|
@@ -11,6 +11,7 @@
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
#include <linux/err.h>
|
|
|
+#include <linux/kdebug.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/vmalloc.h>
|
|
|
#include <linux/fs.h>
|
|
@@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|
|
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
|
|
|
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
|
|
|
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
|
|
|
+ { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
|
|
|
+ { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
|
|
|
+ { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
|
|
|
+ { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
|
|
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
|
|
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
|
|
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
|
|
@@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = {
|
|
|
KVM_REG_MIPS_CP0_STATUS,
|
|
|
KVM_REG_MIPS_CP0_CAUSE,
|
|
|
KVM_REG_MIPS_CP0_EPC,
|
|
|
+ KVM_REG_MIPS_CP0_PRID,
|
|
|
KVM_REG_MIPS_CP0_CONFIG,
|
|
|
KVM_REG_MIPS_CP0_CONFIG1,
|
|
|
KVM_REG_MIPS_CP0_CONFIG2,
|
|
|
KVM_REG_MIPS_CP0_CONFIG3,
|
|
|
+ KVM_REG_MIPS_CP0_CONFIG4,
|
|
|
+ KVM_REG_MIPS_CP0_CONFIG5,
|
|
|
KVM_REG_MIPS_CP0_CONFIG7,
|
|
|
KVM_REG_MIPS_CP0_ERROREPC,
|
|
|
|
|
@@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|
|
const struct kvm_one_reg *reg)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
|
|
int ret;
|
|
|
s64 v;
|
|
|
+ s64 vs[2];
|
|
|
+ unsigned int idx;
|
|
|
|
|
|
switch (reg->id) {
|
|
|
+ /* General purpose registers */
|
|
|
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
|
|
|
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
|
|
|
break;
|
|
@@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|
|
v = (long)vcpu->arch.pc;
|
|
|
break;
|
|
|
|
|
|
+ /* Floating point registers */
|
|
|
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
|
|
|
+ /* Odd singles in top of even double when FR=0 */
|
|
|
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
|
|
|
+ v = get_fpr32(&fpu->fpr[idx], 0);
|
|
|
+ else
|
|
|
+ v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
|
|
|
+ /* Can't access odd doubles in FR=0 mode */
|
|
|
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
|
|
+ return -EINVAL;
|
|
|
+ v = get_fpr64(&fpu->fpr[idx], 0);
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_FCR_IR:
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ v = boot_cpu_data.fpu_id;
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_FCR_CSR:
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ v = fpu->fcr31;
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* MIPS SIMD Architecture (MSA) registers */
|
|
|
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
|
|
|
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ /* Can't access MSA registers in FR=0 mode */
|
|
|
+ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
|
|
+ return -EINVAL;
|
|
|
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
|
|
|
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
|
+ /* least significant byte first */
|
|
|
+ vs[0] = get_fpr64(&fpu->fpr[idx], 0);
|
|
|
+ vs[1] = get_fpr64(&fpu->fpr[idx], 1);
|
|
|
+#else
|
|
|
+ /* most significant byte first */
|
|
|
+ vs[0] = get_fpr64(&fpu->fpr[idx], 1);
|
|
|
+ vs[1] = get_fpr64(&fpu->fpr[idx], 0);
|
|
|
+#endif
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_MSA_IR:
|
|
|
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ v = boot_cpu_data.msa_id;
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_MSA_CSR:
|
|
|
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ v = fpu->msacsr;
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* Co-processor 0 registers */
|
|
|
case KVM_REG_MIPS_CP0_INDEX:
|
|
|
v = (long)kvm_read_c0_guest_index(cop0);
|
|
|
break;
|
|
@@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|
|
case KVM_REG_MIPS_CP0_EPC:
|
|
|
v = (long)kvm_read_c0_guest_epc(cop0);
|
|
|
break;
|
|
|
- case KVM_REG_MIPS_CP0_ERROREPC:
|
|
|
- v = (long)kvm_read_c0_guest_errorepc(cop0);
|
|
|
+ case KVM_REG_MIPS_CP0_PRID:
|
|
|
+ v = (long)kvm_read_c0_guest_prid(cop0);
|
|
|
break;
|
|
|
case KVM_REG_MIPS_CP0_CONFIG:
|
|
|
v = (long)kvm_read_c0_guest_config(cop0);
|
|
@@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|
|
case KVM_REG_MIPS_CP0_CONFIG3:
|
|
|
v = (long)kvm_read_c0_guest_config3(cop0);
|
|
|
break;
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG4:
|
|
|
+ v = (long)kvm_read_c0_guest_config4(cop0);
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG5:
|
|
|
+ v = (long)kvm_read_c0_guest_config5(cop0);
|
|
|
+ break;
|
|
|
case KVM_REG_MIPS_CP0_CONFIG7:
|
|
|
v = (long)kvm_read_c0_guest_config7(cop0);
|
|
|
break;
|
|
|
+ case KVM_REG_MIPS_CP0_ERROREPC:
|
|
|
+ v = (long)kvm_read_c0_guest_errorepc(cop0);
|
|
|
+ break;
|
|
|
/* registers to be handled specially */
|
|
|
case KVM_REG_MIPS_CP0_COUNT:
|
|
|
case KVM_REG_MIPS_COUNT_CTL:
|
|
@@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|
|
u32 v32 = (u32)v;
|
|
|
|
|
|
return put_user(v32, uaddr32);
|
|
|
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
|
|
|
+ void __user *uaddr = (void __user *)(long)reg->addr;
|
|
|
+
|
|
|
+ return copy_to_user(uaddr, vs, 16);
|
|
|
} else {
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
const struct kvm_one_reg *reg)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
- u64 v;
|
|
|
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
|
|
+ s64 v;
|
|
|
+ s64 vs[2];
|
|
|
+ unsigned int idx;
|
|
|
|
|
|
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
|
|
|
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
|
|
@@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
if (get_user(v32, uaddr32) != 0)
|
|
|
return -EFAULT;
|
|
|
v = (s64)v32;
|
|
|
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
|
|
|
+ void __user *uaddr = (void __user *)(long)reg->addr;
|
|
|
+
|
|
|
+ return copy_from_user(vs, uaddr, 16);
|
|
|
} else {
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
switch (reg->id) {
|
|
|
+ /* General purpose registers */
|
|
|
case KVM_REG_MIPS_R0:
|
|
|
/* Silently ignore requests to set $0 */
|
|
|
break;
|
|
@@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
vcpu->arch.pc = v;
|
|
|
break;
|
|
|
|
|
|
+ /* Floating point registers */
|
|
|
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
|
|
|
+ /* Odd singles in top of even double when FR=0 */
|
|
|
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
|
|
|
+ set_fpr32(&fpu->fpr[idx], 0, v);
|
|
|
+ else
|
|
|
+ set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
|
|
|
+ /* Can't access odd doubles in FR=0 mode */
|
|
|
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
|
|
+ return -EINVAL;
|
|
|
+ set_fpr64(&fpu->fpr[idx], 0, v);
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_FCR_IR:
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ /* Read-only */
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_FCR_CSR:
|
|
|
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ fpu->fcr31 = v;
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* MIPS SIMD Architecture (MSA) registers */
|
|
|
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
|
|
|
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
|
|
|
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
|
+ /* least significant byte first */
|
|
|
+ set_fpr64(&fpu->fpr[idx], 0, vs[0]);
|
|
|
+ set_fpr64(&fpu->fpr[idx], 1, vs[1]);
|
|
|
+#else
|
|
|
+ /* most significant byte first */
|
|
|
+ set_fpr64(&fpu->fpr[idx], 1, vs[0]);
|
|
|
+ set_fpr64(&fpu->fpr[idx], 0, vs[1]);
|
|
|
+#endif
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_MSA_IR:
|
|
|
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ /* Read-only */
|
|
|
+ break;
|
|
|
+ case KVM_REG_MIPS_MSA_CSR:
|
|
|
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
|
|
+ return -EINVAL;
|
|
|
+ fpu->msacsr = v;
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* Co-processor 0 registers */
|
|
|
case KVM_REG_MIPS_CP0_INDEX:
|
|
|
kvm_write_c0_guest_index(cop0, v);
|
|
|
break;
|
|
@@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
case KVM_REG_MIPS_CP0_EPC:
|
|
|
kvm_write_c0_guest_epc(cop0, v);
|
|
|
break;
|
|
|
+ case KVM_REG_MIPS_CP0_PRID:
|
|
|
+ kvm_write_c0_guest_prid(cop0, v);
|
|
|
+ break;
|
|
|
case KVM_REG_MIPS_CP0_ERROREPC:
|
|
|
kvm_write_c0_guest_errorepc(cop0, v);
|
|
|
break;
|
|
@@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
case KVM_REG_MIPS_CP0_COUNT:
|
|
|
case KVM_REG_MIPS_CP0_COMPARE:
|
|
|
case KVM_REG_MIPS_CP0_CAUSE:
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG:
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG1:
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG2:
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG3:
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG4:
|
|
|
+ case KVM_REG_MIPS_CP0_CONFIG5:
|
|
|
case KVM_REG_MIPS_COUNT_CTL:
|
|
|
case KVM_REG_MIPS_COUNT_RESUME:
|
|
|
case KVM_REG_MIPS_COUNT_HZ:
|
|
@@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_enable_cap *cap)
|
|
|
+{
|
|
|
+ int r = 0;
|
|
|
+
|
|
|
+ if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
|
|
|
+ return -EINVAL;
|
|
|
+ if (cap->flags)
|
|
|
+ return -EINVAL;
|
|
|
+ if (cap->args[0])
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ switch (cap->cap) {
|
|
|
+ case KVM_CAP_MIPS_FPU:
|
|
|
+ vcpu->arch.fpu_enabled = true;
|
|
|
+ break;
|
|
|
+ case KVM_CAP_MIPS_MSA:
|
|
|
+ vcpu->arch.msa_enabled = true;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ r = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
|
|
unsigned long arg)
|
|
|
{
|
|
@@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
|
|
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
|
|
|
break;
|
|
|
}
|
|
|
+ case KVM_ENABLE_CAP: {
|
|
|
+ struct kvm_enable_cap cap;
|
|
|
+
|
|
|
+ r = -EFAULT;
|
|
|
+ if (copy_from_user(&cap, argp, sizeof(cap)))
|
|
|
+ goto out;
|
|
|
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
|
|
+ break;
|
|
|
+ }
|
|
|
default:
|
|
|
r = -ENOIOCTLCMD;
|
|
|
}
|
|
@@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|
|
|
|
|
switch (ext) {
|
|
|
case KVM_CAP_ONE_REG:
|
|
|
+ case KVM_CAP_ENABLE_CAP:
|
|
|
r = 1;
|
|
|
break;
|
|
|
case KVM_CAP_COALESCED_MMIO:
|
|
|
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
|
|
break;
|
|
|
+ case KVM_CAP_MIPS_FPU:
|
|
|
+ r = !!cpu_has_fpu;
|
|
|
+ break;
|
|
|
+ case KVM_CAP_MIPS_MSA:
|
|
|
+ /*
|
|
|
+ * We don't support MSA vector partitioning yet:
|
|
|
+ * 1) It would require explicit support which can't be tested
|
|
|
+ * yet due to lack of support in current hardware.
|
|
|
+ * 2) It extends the state that would need to be saved/restored
|
|
|
+ * by e.g. QEMU for migration.
|
|
|
+ *
|
|
|
+ * When vector partitioning hardware becomes available, support
|
|
|
+ * could be added by requiring a flag when enabling
|
|
|
+ * KVM_CAP_MIPS_MSA capability to indicate that userland knows
|
|
|
+ * to save/restore the appropriate extra state.
|
|
|
+ */
|
|
|
+ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
|
|
|
+ break;
|
|
|
default:
|
|
|
r = 0;
|
|
|
break;
|
|
@@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
|
ret = kvm_mips_callbacks->handle_break(vcpu);
|
|
|
break;
|
|
|
|
|
|
+ case T_TRAP:
|
|
|
+ ++vcpu->stat.trap_inst_exits;
|
|
|
+ trace_kvm_exit(vcpu, TRAP_INST_EXITS);
|
|
|
+ ret = kvm_mips_callbacks->handle_trap(vcpu);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case T_MSAFPE:
|
|
|
+ ++vcpu->stat.msa_fpe_exits;
|
|
|
+ trace_kvm_exit(vcpu, MSA_FPE_EXITS);
|
|
|
+ ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case T_FPE:
|
|
|
+ ++vcpu->stat.fpe_exits;
|
|
|
+ trace_kvm_exit(vcpu, FPE_EXITS);
|
|
|
+ ret = kvm_mips_callbacks->handle_fpe(vcpu);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case T_MSADIS:
|
|
|
+ ++vcpu->stat.msa_disabled_exits;
|
|
|
+ trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
|
|
|
+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
|
|
|
+ break;
|
|
|
+
|
|
|
default:
|
|
|
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
|
|
|
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
|
|
@@ -1146,12 +1386,233 @@ skip_emul:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (ret == RESUME_GUEST) {
|
|
|
+ /*
|
|
|
+ * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
|
|
|
+ * is live), restore FCR31 / MSACSR.
|
|
|
+ *
|
|
|
+ * This should be before returning to the guest exception
|
|
|
+ * vector, as it may well cause an [MSA] FP exception if there
|
|
|
+ * are pending exception bits unmasked. (see
|
|
|
+ * kvm_mips_csr_die_notifier() for how that is handled).
|
|
|
+ */
|
|
|
+ if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
|
|
|
+ read_c0_status() & ST0_CU1)
|
|
|
+ __kvm_restore_fcsr(&vcpu->arch);
|
|
|
+
|
|
|
+ if (kvm_mips_guest_has_msa(&vcpu->arch) &&
|
|
|
+ read_c0_config5() & MIPS_CONF5_MSAEN)
|
|
|
+ __kvm_restore_msacsr(&vcpu->arch);
|
|
|
+ }
|
|
|
+
|
|
|
/* Disable HTW before returning to guest or host */
|
|
|
htw_stop();
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/* Enable FPU for guest and restore context */
|
|
|
+void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
+ unsigned int sr, cfg5;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ sr = kvm_read_c0_guest_status(cop0);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If MSA state is already live, it is undefined how it interacts with
|
|
|
+ * FR=0 FPU state, and we don't want to hit reserved instruction
|
|
|
+ * exceptions trying to save the MSA state later when CU=1 && FR=1, so
|
|
|
+ * play it safe and save it first.
|
|
|
+ *
|
|
|
+ * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
|
|
|
+ * get called when guest CU1 is set, however we can't trust the guest
|
|
|
+ * not to clobber the status register directly via the commpage.
|
|
|
+ */
|
|
|
+ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
|
|
|
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
|
|
+ kvm_lose_fpu(vcpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Enable FPU for guest
|
|
|
+ * We set FR and FRE according to guest context
|
|
|
+ */
|
|
|
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
|
|
|
+ if (cpu_has_fre) {
|
|
|
+ cfg5 = kvm_read_c0_guest_config5(cop0);
|
|
|
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
|
|
|
+ }
|
|
|
+ enable_fpu_hazard();
|
|
|
+
|
|
|
+ /* If guest FPU state not active, restore it now */
|
|
|
+ if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
|
|
|
+ __kvm_restore_fpu(&vcpu->arch);
|
|
|
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
|
|
|
+ }
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_HAS_MSA
|
|
|
+/* Enable MSA for guest and restore context */
|
|
|
+void kvm_own_msa(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
+ unsigned int sr, cfg5;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Enable FPU if enabled in guest, since we're restoring FPU context
|
|
|
+ * anyway. We set FR and FRE according to guest context.
|
|
|
+ */
|
|
|
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
|
|
|
+ sr = kvm_read_c0_guest_status(cop0);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If FR=0 FPU state is already live, it is undefined how it
|
|
|
+ * interacts with MSA state, so play it safe and save it first.
|
|
|
+ */
|
|
|
+ if (!(sr & ST0_FR) &&
|
|
|
+ (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
|
|
|
+ KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
|
|
|
+ kvm_lose_fpu(vcpu);
|
|
|
+
|
|
|
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
|
|
|
+ if (sr & ST0_CU1 && cpu_has_fre) {
|
|
|
+ cfg5 = kvm_read_c0_guest_config5(cop0);
|
|
|
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Enable MSA for guest */
|
|
|
+ set_c0_config5(MIPS_CONF5_MSAEN);
|
|
|
+ enable_fpu_hazard();
|
|
|
+
|
|
|
+ switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
|
|
|
+ case KVM_MIPS_FPU_FPU:
|
|
|
+ /*
|
|
|
+ * Guest FPU state already loaded, only restore upper MSA state
|
|
|
+ */
|
|
|
+ __kvm_restore_msa_upper(&vcpu->arch);
|
|
|
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
|
|
|
+ break;
|
|
|
+ case 0:
|
|
|
+ /* Neither FPU or MSA already active, restore full MSA state */
|
|
|
+ __kvm_restore_msa(&vcpu->arch);
|
|
|
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
|
|
|
+ if (kvm_mips_guest_has_fpu(&vcpu->arch))
|
|
|
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+/* Drop FPU & MSA without saving it */
|
|
|
+void kvm_drop_fpu(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
|
|
|
+ disable_msa();
|
|
|
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
|
|
|
+ }
|
|
|
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
|
|
+ clear_c0_status(ST0_CU1 | ST0_FR);
|
|
|
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+/* Save and disable FPU & MSA */
|
|
|
+void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * FPU & MSA get disabled in root context (hardware) when it is disabled
|
|
|
+ * in guest context (software), but the register state in the hardware
|
|
|
+ * may still be in use. This is why we explicitly re-enable the hardware
|
|
|
+ * before saving.
|
|
|
+ */
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
|
|
|
+ set_c0_config5(MIPS_CONF5_MSAEN);
|
|
|
+ enable_fpu_hazard();
|
|
|
+
|
|
|
+ __kvm_save_msa(&vcpu->arch);
|
|
|
+
|
|
|
+ /* Disable MSA & FPU */
|
|
|
+ disable_msa();
|
|
|
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
|
|
|
+ clear_c0_status(ST0_CU1 | ST0_FR);
|
|
|
+ vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
|
|
|
+ } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
|
|
+ set_c0_status(ST0_CU1);
|
|
|
+ enable_fpu_hazard();
|
|
|
+
|
|
|
+ __kvm_save_fpu(&vcpu->arch);
|
|
|
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
|
|
|
+
|
|
|
+ /* Disable FPU */
|
|
|
+ clear_c0_status(ST0_CU1 | ST0_FR);
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
|
|
|
+ * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
|
|
|
+ * exception if cause bits are set in the value being written.
|
|
|
+ */
|
|
|
+static int kvm_mips_csr_die_notify(struct notifier_block *self,
|
|
|
+ unsigned long cmd, void *ptr)
|
|
|
+{
|
|
|
+ struct die_args *args = (struct die_args *)ptr;
|
|
|
+ struct pt_regs *regs = args->regs;
|
|
|
+ unsigned long pc;
|
|
|
+
|
|
|
+ /* Only interested in FPE and MSAFPE */
|
|
|
+ if (cmd != DIE_FP && cmd != DIE_MSAFP)
|
|
|
+ return NOTIFY_DONE;
|
|
|
+
|
|
|
+ /* Return immediately if guest context isn't active */
|
|
|
+ if (!(current->flags & PF_VCPU))
|
|
|
+ return NOTIFY_DONE;
|
|
|
+
|
|
|
+ /* Should never get here from user mode */
|
|
|
+ BUG_ON(user_mode(regs));
|
|
|
+
|
|
|
+ pc = instruction_pointer(regs);
|
|
|
+ switch (cmd) {
|
|
|
+ case DIE_FP:
|
|
|
+ /* match 2nd instruction in __kvm_restore_fcsr */
|
|
|
+ if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ break;
|
|
|
+ case DIE_MSAFP:
|
|
|
+ /* match 2nd/3rd instruction in __kvm_restore_msacsr */
|
|
|
+ if (!cpu_has_msa ||
|
|
|
+ pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
|
|
|
+ pc > (unsigned long)&__kvm_restore_msacsr + 8)
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Move PC forward a little and continue executing */
|
|
|
+ instruction_pointer(regs) += 4;
|
|
|
+
|
|
|
+ return NOTIFY_STOP;
|
|
|
+}
|
|
|
+
|
|
|
+static struct notifier_block kvm_mips_csr_die_notifier = {
|
|
|
+ .notifier_call = kvm_mips_csr_die_notify,
|
|
|
+};
|
|
|
+
|
|
|
int __init kvm_mips_init(void)
|
|
|
{
|
|
|
int ret;
|
|
@@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
+ register_die_notifier(&kvm_mips_csr_die_notifier);
|
|
|
+
|
|
|
/*
|
|
|
* On MIPS, kernel modules are executed from "mapped space", which
|
|
|
* requires TLBs. The TLB handling code is statically linked with
|
|
@@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void)
|
|
|
kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
|
|
|
kvm_mips_is_error_pfn = is_error_pfn;
|
|
|
|
|
|
- pr_info("KVM/MIPS Initialized\n");
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void)
|
|
|
kvm_mips_release_pfn_clean = NULL;
|
|
|
kvm_mips_is_error_pfn = NULL;
|
|
|
|
|
|
- pr_info("KVM/MIPS unloaded\n");
|
|
|
+ unregister_die_notifier(&kvm_mips_csr_die_notifier);
|
|
|
}
|
|
|
|
|
|
module_init(kvm_mips_init);
|