|
@@ -131,7 +131,7 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
|
|
* Config: M, [MT]
|
|
* Config: M, [MT]
|
|
* Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
|
|
* Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
|
|
* Config2: M
|
|
* Config2: M
|
|
- * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP, CTXTC, ITL, LPA, VEIC,
|
|
|
|
|
|
+ * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
|
|
* VInt, SP, CDMM, MT, SM, TL]
|
|
* VInt, SP, CDMM, MT, SM, TL]
|
|
* Config4: M, [VTLBSizeExt, MMUSizeExt]
|
|
* Config4: M, [VTLBSizeExt, MMUSizeExt]
|
|
* Config5: [MRP]
|
|
* Config5: [MRP]
|
|
@@ -161,7 +161,7 @@ static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
|
|
static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
|
|
static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
|
|
unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
|
|
- MIPS_CONF3_ULRI;
|
|
|
|
|
|
+ MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
|
|
|
|
|
|
/* Permit MSA to be present if MSA is supported */
|
|
/* Permit MSA to be present if MSA is supported */
|
|
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
|
|
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
|
|
@@ -1205,6 +1205,13 @@ static u64 kvm_vz_get_one_regs[] = {
|
|
KVM_REG_MIPS_COUNT_HZ,
|
|
KVM_REG_MIPS_COUNT_HZ,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static u64 kvm_vz_get_one_regs_contextconfig[] = {
|
|
|
|
+ KVM_REG_MIPS_CP0_CONTEXTCONFIG,
|
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
|
+ KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
|
|
|
|
+#endif
|
|
|
|
+};
|
|
|
|
+
|
|
static u64 kvm_vz_get_one_regs_kscratch[] = {
|
|
static u64 kvm_vz_get_one_regs_kscratch[] = {
|
|
KVM_REG_MIPS_CP0_KSCRATCH1,
|
|
KVM_REG_MIPS_CP0_KSCRATCH1,
|
|
KVM_REG_MIPS_CP0_KSCRATCH2,
|
|
KVM_REG_MIPS_CP0_KSCRATCH2,
|
|
@@ -1225,6 +1232,8 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
|
|
++ret;
|
|
++ret;
|
|
if (cpu_guest_has_badinstrp)
|
|
if (cpu_guest_has_badinstrp)
|
|
++ret;
|
|
++ret;
|
|
|
|
+ if (cpu_guest_has_contextconfig)
|
|
|
|
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
|
|
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
|
|
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -1258,6 +1267,12 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
++indices;
|
|
++indices;
|
|
}
|
|
}
|
|
|
|
+ if (cpu_guest_has_contextconfig) {
|
|
|
|
+ if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
|
|
|
|
+ sizeof(kvm_vz_get_one_regs_contextconfig)))
|
|
|
|
+ return -EFAULT;
|
|
|
|
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
|
|
|
|
+ }
|
|
for (i = 0; i < 6; ++i) {
|
|
for (i = 0; i < 6; ++i) {
|
|
if (!cpu_guest_has_kscr(i + 2))
|
|
if (!cpu_guest_has_kscr(i + 2))
|
|
continue;
|
|
continue;
|
|
@@ -1323,11 +1338,23 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
|
|
case KVM_REG_MIPS_CP0_CONTEXT:
|
|
case KVM_REG_MIPS_CP0_CONTEXT:
|
|
*v = (long)read_gc0_context();
|
|
*v = (long)read_gc0_context();
|
|
break;
|
|
break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
|
|
|
|
+ if (!cpu_guest_has_contextconfig)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ *v = read_gc0_contextconfig();
|
|
|
|
+ break;
|
|
case KVM_REG_MIPS_CP0_USERLOCAL:
|
|
case KVM_REG_MIPS_CP0_USERLOCAL:
|
|
if (!cpu_guest_has_userlocal)
|
|
if (!cpu_guest_has_userlocal)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
*v = read_gc0_userlocal();
|
|
*v = read_gc0_userlocal();
|
|
break;
|
|
break;
|
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
|
+ case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
|
|
|
|
+ if (!cpu_guest_has_contextconfig)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ *v = read_gc0_xcontextconfig();
|
|
|
|
+ break;
|
|
|
|
+#endif
|
|
case KVM_REG_MIPS_CP0_PAGEMASK:
|
|
case KVM_REG_MIPS_CP0_PAGEMASK:
|
|
*v = (long)read_gc0_pagemask();
|
|
*v = (long)read_gc0_pagemask();
|
|
break;
|
|
break;
|
|
@@ -1478,11 +1505,23 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
|
|
case KVM_REG_MIPS_CP0_CONTEXT:
|
|
case KVM_REG_MIPS_CP0_CONTEXT:
|
|
write_gc0_context(v);
|
|
write_gc0_context(v);
|
|
break;
|
|
break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
|
|
|
|
+ if (!cpu_guest_has_contextconfig)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ write_gc0_contextconfig(v);
|
|
|
|
+ break;
|
|
case KVM_REG_MIPS_CP0_USERLOCAL:
|
|
case KVM_REG_MIPS_CP0_USERLOCAL:
|
|
if (!cpu_guest_has_userlocal)
|
|
if (!cpu_guest_has_userlocal)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
write_gc0_userlocal(v);
|
|
write_gc0_userlocal(v);
|
|
break;
|
|
break;
|
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
|
+ case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
|
|
|
|
+ if (!cpu_guest_has_contextconfig)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ write_gc0_xcontextconfig(v);
|
|
|
|
+ break;
|
|
|
|
+#endif
|
|
case KVM_REG_MIPS_CP0_PAGEMASK:
|
|
case KVM_REG_MIPS_CP0_PAGEMASK:
|
|
write_gc0_pagemask(v);
|
|
write_gc0_pagemask(v);
|
|
break;
|
|
break;
|
|
@@ -1874,8 +1913,12 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
kvm_restore_gc0_entrylo0(cop0);
|
|
kvm_restore_gc0_entrylo0(cop0);
|
|
kvm_restore_gc0_entrylo1(cop0);
|
|
kvm_restore_gc0_entrylo1(cop0);
|
|
kvm_restore_gc0_context(cop0);
|
|
kvm_restore_gc0_context(cop0);
|
|
|
|
+ if (cpu_guest_has_contextconfig)
|
|
|
|
+ kvm_restore_gc0_contextconfig(cop0);
|
|
#ifdef CONFIG_64BIT
|
|
#ifdef CONFIG_64BIT
|
|
kvm_restore_gc0_xcontext(cop0);
|
|
kvm_restore_gc0_xcontext(cop0);
|
|
|
|
+ if (cpu_guest_has_contextconfig)
|
|
|
|
+ kvm_restore_gc0_xcontextconfig(cop0);
|
|
#endif
|
|
#endif
|
|
kvm_restore_gc0_pagemask(cop0);
|
|
kvm_restore_gc0_pagemask(cop0);
|
|
kvm_restore_gc0_pagegrain(cop0);
|
|
kvm_restore_gc0_pagegrain(cop0);
|
|
@@ -1933,8 +1976,12 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
|
kvm_save_gc0_entrylo0(cop0);
|
|
kvm_save_gc0_entrylo0(cop0);
|
|
kvm_save_gc0_entrylo1(cop0);
|
|
kvm_save_gc0_entrylo1(cop0);
|
|
kvm_save_gc0_context(cop0);
|
|
kvm_save_gc0_context(cop0);
|
|
|
|
+ if (cpu_guest_has_contextconfig)
|
|
|
|
+ kvm_save_gc0_contextconfig(cop0);
|
|
#ifdef CONFIG_64BIT
|
|
#ifdef CONFIG_64BIT
|
|
kvm_save_gc0_xcontext(cop0);
|
|
kvm_save_gc0_xcontext(cop0);
|
|
|
|
+ if (cpu_guest_has_contextconfig)
|
|
|
|
+ kvm_save_gc0_xcontextconfig(cop0);
|
|
#endif
|
|
#endif
|
|
kvm_save_gc0_pagemask(cop0);
|
|
kvm_save_gc0_pagemask(cop0);
|
|
kvm_save_gc0_pagegrain(cop0);
|
|
kvm_save_gc0_pagegrain(cop0);
|
|
@@ -2298,6 +2345,17 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
|
|
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (cpu_guest_has_contextconfig) {
|
|
|
|
+ /* ContextConfig */
|
|
|
|
+ kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
|
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
|
+ /* XContextConfig */
|
|
|
|
+ /* bits SEGBITS-13+3:4 set */
|
|
|
|
+ kvm_write_sw_gc0_xcontextconfig(cop0,
|
|
|
|
+ ((1ull << (cpu_vmbits - 13)) - 1) << 4);
|
|
|
|
+#endif
|
|
|
|
+ }
|
|
|
|
+
|
|
/* start with no pending virtual guest interrupts */
|
|
/* start with no pending virtual guest interrupts */
|
|
if (cpu_has_guestctl2)
|
|
if (cpu_has_guestctl2)
|
|
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
|
|
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
|