|
@@ -638,8 +638,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|
|
r = 1;
|
|
|
break;
|
|
|
case KVM_CAP_SPAPR_RESIZE_HPT:
|
|
|
- /* Disable this on POWER9 until code handles new HPTE format */
|
|
|
- r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
|
|
|
+ r = !!hv_enabled;
|
|
|
break;
|
|
|
#endif
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
@@ -930,6 +929,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
|
|
|
+ u64 gpr)
|
|
|
+{
|
|
|
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
|
|
+ u32 hi, lo;
|
|
|
+ u32 di;
|
|
|
+
|
|
|
+#ifdef __BIG_ENDIAN
|
|
|
+ hi = gpr >> 32;
|
|
|
+ lo = gpr & 0xffffffff;
|
|
|
+#else
|
|
|
+ lo = gpr >> 32;
|
|
|
+ hi = gpr & 0xffffffff;
|
|
|
+#endif
|
|
|
+
|
|
|
+ di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
|
|
|
+ if (di > 1)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (vcpu->arch.mmio_host_swabbed)
|
|
|
+ di = 1 - di;
|
|
|
+
|
|
|
+ VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
|
|
|
+ VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
|
|
|
+}
|
|
|
+#endif /* CONFIG_ALTIVEC */
|
|
|
+
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
static inline u64 sp_to_dp(u32 fprs)
|
|
|
{
|
|
@@ -1032,6 +1059,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
|
|
|
kvmppc_set_vsr_dword_dump(vcpu, gpr);
|
|
|
break;
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+ case KVM_MMIO_REG_VMX:
|
|
|
+ kvmppc_set_vmx_dword(vcpu, gpr);
|
|
|
+ break;
|
|
|
#endif
|
|
|
default:
|
|
|
BUG();
|
|
@@ -1308,6 +1340,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+/* handle quadword load access in two halves */
|
|
|
+int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
+ unsigned int rt, int is_default_endian)
|
|
|
+{
|
|
|
+ enum emulation_result emulated;
|
|
|
+
|
|
|
+ while (vcpu->arch.mmio_vmx_copy_nums) {
|
|
|
+ emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
|
|
|
+ is_default_endian, 0);
|
|
|
+
|
|
|
+ if (emulated != EMULATE_DONE)
|
|
|
+ break;
|
|
|
+
|
|
|
+ vcpu->arch.paddr_accessed += run->mmio.len;
|
|
|
+ vcpu->arch.mmio_vmx_copy_nums--;
|
|
|
+ }
|
|
|
+
|
|
|
+ return emulated;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
|
|
+{
|
|
|
+ vector128 vrs = VCPU_VSX_VR(vcpu, rs);
|
|
|
+ u32 di;
|
|
|
+ u64 w0, w1;
|
|
|
+
|
|
|
+ di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
|
|
|
+ if (di > 1)
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ if (vcpu->arch.mmio_host_swabbed)
|
|
|
+ di = 1 - di;
|
|
|
+
|
|
|
+ w0 = vrs.u[di * 2];
|
|
|
+ w1 = vrs.u[di * 2 + 1];
|
|
|
+
|
|
|
+#ifdef __BIG_ENDIAN
|
|
|
+ *val = (w0 << 32) | w1;
|
|
|
+#else
|
|
|
+ *val = (w1 << 32) | w0;
|
|
|
+#endif
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* handle quadword store in two halves */
|
|
|
+int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
+ unsigned int rs, int is_default_endian)
|
|
|
+{
|
|
|
+ u64 val = 0;
|
|
|
+ enum emulation_result emulated = EMULATE_DONE;
|
|
|
+
|
|
|
+ vcpu->arch.io_gpr = rs;
|
|
|
+
|
|
|
+ while (vcpu->arch.mmio_vmx_copy_nums) {
|
|
|
+ if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
|
|
|
+ return EMULATE_FAIL;
|
|
|
+
|
|
|
+ emulated = kvmppc_handle_store(run, vcpu, val, 8,
|
|
|
+ is_default_endian);
|
|
|
+ if (emulated != EMULATE_DONE)
|
|
|
+ break;
|
|
|
+
|
|
|
+ vcpu->arch.paddr_accessed += run->mmio.len;
|
|
|
+ vcpu->arch.mmio_vmx_copy_nums--;
|
|
|
+ }
|
|
|
+
|
|
|
+ return emulated;
|
|
|
+}
|
|
|
+
|
|
|
+static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_run *run)
|
|
|
+{
|
|
|
+ enum emulation_result emulated = EMULATE_FAIL;
|
|
|
+ int r;
|
|
|
+
|
|
|
+ vcpu->arch.paddr_accessed += run->mmio.len;
|
|
|
+
|
|
|
+ if (!vcpu->mmio_is_write) {
|
|
|
+ emulated = kvmppc_handle_load128_by2x64(run, vcpu,
|
|
|
+ vcpu->arch.io_gpr, 1);
|
|
|
+ } else {
|
|
|
+ emulated = kvmppc_handle_store128_by2x64(run, vcpu,
|
|
|
+ vcpu->arch.io_gpr, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (emulated) {
|
|
|
+ case EMULATE_DO_MMIO:
|
|
|
+ run->exit_reason = KVM_EXIT_MMIO;
|
|
|
+ r = RESUME_HOST;
|
|
|
+ break;
|
|
|
+ case EMULATE_FAIL:
|
|
|
+ pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
|
|
|
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
+ run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
|
|
+ r = RESUME_HOST;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ r = RESUME_GUEST;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return r;
|
|
|
+}
|
|
|
+#endif /* CONFIG_ALTIVEC */
|
|
|
+
|
|
|
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|
|
{
|
|
|
int r = 0;
|
|
@@ -1428,6 +1565,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+ if (vcpu->arch.mmio_vmx_copy_nums > 0)
|
|
|
+ vcpu->arch.mmio_vmx_copy_nums--;
|
|
|
+
|
|
|
+ if (vcpu->arch.mmio_vmx_copy_nums > 0) {
|
|
|
+ r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
|
|
|
+ if (r == RESUME_HOST) {
|
|
|
+ vcpu->mmio_needed = 1;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
#endif
|
|
|
} else if (vcpu->arch.osi_needed) {
|
|
|
u64 *gprs = run->osi.gprs;
|