|
@@ -0,0 +1,139 @@
|
|
|
+/*
|
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
|
+ * it under the terms of the GNU General Public License, version 2, as
|
|
|
+ * published by the Free Software Foundation.
|
|
|
+ *
|
|
|
+ * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
|
+ */
|
|
|
+
|
|
|
+#include <linux/types.h>
|
|
|
+#include <linux/string.h>
|
|
|
+#include <linux/kvm.h>
|
|
|
+#include <linux/kvm_host.h>
|
|
|
+
|
|
|
+#include <asm/kvm_ppc.h>
|
|
|
+#include <asm/kvm_book3s.h>
|
|
|
+#include <asm/page.h>
|
|
|
+#include <asm/mmu.h>
|
|
|
+#include <asm/pgtable.h>
|
|
|
+#include <asm/pgalloc.h>
|
|
|
+
|
|
|
+/*
|
|
|
+ * Supported radix tree geometry.
|
|
|
+ * Like p9, we support either 5 or 9 bits at the first (lowest) level,
|
|
|
+ * for a page size of 64k or 4k.
|
|
|
+ */
|
|
|
+static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
|
|
|
+
|
|
|
+int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|
|
+ struct kvmppc_pte *gpte, bool data, bool iswrite)
|
|
|
+{
|
|
|
+ struct kvm *kvm = vcpu->kvm;
|
|
|
+ u32 pid;
|
|
|
+ int ret, level, ps;
|
|
|
+ __be64 prte, rpte;
|
|
|
+ unsigned long root, pte, index;
|
|
|
+ unsigned long rts, bits, offset;
|
|
|
+ unsigned long gpa;
|
|
|
+ unsigned long proc_tbl_size;
|
|
|
+
|
|
|
+ /* Work out effective PID */
|
|
|
+ switch (eaddr >> 62) {
|
|
|
+ case 0:
|
|
|
+ pid = vcpu->arch.pid;
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ pid = 0;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
|
|
|
+ if (pid * 16 >= proc_tbl_size)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* Read partition table to find root of tree for effective PID */
|
|
|
+ ret = kvm_read_guest(kvm, kvm->arch.process_table + pid * 16,
|
|
|
+ &prte, sizeof(prte));
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ root = be64_to_cpu(prte);
|
|
|
+ rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
|
|
|
+ ((root & RTS2_MASK) >> RTS2_SHIFT);
|
|
|
+ bits = root & RPDS_MASK;
|
|
|
+ root = root & RPDB_MASK;
|
|
|
+
|
|
|
+ /* P9 DD1 interprets RTS (radix tree size) differently */
|
|
|
+ offset = rts + 31;
|
|
|
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
|
|
+ offset -= 3;
|
|
|
+
|
|
|
+ /* current implementations only support 52-bit space */
|
|
|
+ if (offset != 52)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ for (level = 3; level >= 0; --level) {
|
|
|
+ if (level && bits != p9_supported_radix_bits[level])
|
|
|
+ return -EINVAL;
|
|
|
+ if (level == 0 && !(bits == 5 || bits == 9))
|
|
|
+ return -EINVAL;
|
|
|
+ offset -= bits;
|
|
|
+ index = (eaddr >> offset) & ((1UL << bits) - 1);
|
|
|
+ /* check that low bits of page table base are zero */
|
|
|
+ if (root & ((1UL << (bits + 3)) - 1))
|
|
|
+ return -EINVAL;
|
|
|
+ ret = kvm_read_guest(kvm, root + index * 8,
|
|
|
+ &rpte, sizeof(rpte));
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ pte = __be64_to_cpu(rpte);
|
|
|
+ if (!(pte & _PAGE_PRESENT))
|
|
|
+ return -ENOENT;
|
|
|
+ if (pte & _PAGE_PTE)
|
|
|
+ break;
|
|
|
+ bits = pte & 0x1f;
|
|
|
+ root = pte & 0x0fffffffffffff00ul;
|
|
|
+ }
|
|
|
+ /* need a leaf at lowest level; 512GB pages not supported */
|
|
|
+ if (level < 0 || level == 3)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* offset is now log base 2 of the page size */
|
|
|
+ gpa = pte & 0x01fffffffffff000ul;
|
|
|
+ if (gpa & ((1ul << offset) - 1))
|
|
|
+ return -EINVAL;
|
|
|
+ gpa += eaddr & ((1ul << offset) - 1);
|
|
|
+ for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
|
|
|
+ if (offset == mmu_psize_defs[ps].shift)
|
|
|
+ break;
|
|
|
+ gpte->page_size = ps;
|
|
|
+
|
|
|
+ gpte->eaddr = eaddr;
|
|
|
+ gpte->raddr = gpa;
|
|
|
+
|
|
|
+ /* Work out permissions */
|
|
|
+ gpte->may_read = !!(pte & _PAGE_READ);
|
|
|
+ gpte->may_write = !!(pte & _PAGE_WRITE);
|
|
|
+ gpte->may_execute = !!(pte & _PAGE_EXEC);
|
|
|
+ if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
|
|
+ if (pte & _PAGE_PRIVILEGED) {
|
|
|
+ gpte->may_read = 0;
|
|
|
+ gpte->may_write = 0;
|
|
|
+ gpte->may_execute = 0;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!(pte & _PAGE_PRIVILEGED)) {
|
|
|
+ /* Check AMR/IAMR to see if strict mode is in force */
|
|
|
+ if (vcpu->arch.amr & (1ul << 62))
|
|
|
+ gpte->may_read = 0;
|
|
|
+ if (vcpu->arch.amr & (1ul << 63))
|
|
|
+ gpte->may_write = 0;
|
|
|
+ if (vcpu->arch.iamr & (1ul << 62))
|
|
|
+ gpte->may_execute = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|