|
@@ -411,6 +411,117 @@ static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
|
|
kvm_write_sw_gc0_compare(cop0, compare);
|
|
kvm_write_sw_gc0_compare(cop0, compare);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * is_eva_access() - Find whether an instruction is an EVA memory accessor.
|
|
|
|
+ * @inst: 32-bit instruction encoding.
|
|
|
|
+ *
|
|
|
|
+ * Finds whether @inst encodes an EVA memory access instruction, which would
|
|
|
|
+ * indicate that emulation of it should access the user mode address space
|
|
|
|
+ * instead of the kernel mode address space. This matters for MUSUK segments
|
|
|
|
+ * which are TLB mapped for user mode but unmapped for kernel mode.
|
|
|
|
+ *
|
|
|
|
+ * Returns: Whether @inst encodes an EVA accessor instruction.
|
|
|
|
+ */
|
|
|
|
+static bool is_eva_access(union mips_instruction inst)
|
|
|
|
+{
|
|
|
|
+ if (inst.spec3_format.opcode != spec3_op)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ switch (inst.spec3_format.func) {
|
|
|
|
+ case lwle_op:
|
|
|
|
+ case lwre_op:
|
|
|
|
+ case cachee_op:
|
|
|
|
+ case sbe_op:
|
|
|
|
+ case she_op:
|
|
|
|
+ case sce_op:
|
|
|
|
+ case swe_op:
|
|
|
|
+ case swle_op:
|
|
|
|
+ case swre_op:
|
|
|
|
+ case prefe_op:
|
|
|
|
+ case lbue_op:
|
|
|
|
+ case lhue_op:
|
|
|
|
+ case lbe_op:
|
|
|
|
+ case lhe_op:
|
|
|
|
+ case lle_op:
|
|
|
|
+ case lwe_op:
|
|
|
|
+ return true;
|
|
|
|
+ default:
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * is_eva_am_mapped() - Find whether an access mode is mapped.
|
|
|
|
+ * @vcpu: KVM VCPU state.
|
|
|
|
+ * @am: 3-bit encoded access mode.
|
|
|
|
+ * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
|
|
|
|
+ *
|
|
|
|
+ * Decode @am to find whether it encodes a mapped segment for the current VCPU
|
|
|
|
+ * state. Where necessary @eu and the actual instruction causing the fault are
|
|
|
|
+ * taken into account to make the decision.
|
|
|
|
+ *
|
|
|
|
+ * Returns: Whether the VCPU faulted on a TLB mapped address.
|
|
|
|
+ */
|
|
|
|
+static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
|
|
|
|
+{
|
|
|
|
+ u32 am_lookup;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Interpret access control mode. We assume address errors will already
|
|
|
|
+ * have been caught by the guest, leaving us with:
|
|
|
|
+ * AM UM SM KM 31..24 23..16
|
|
|
|
+ * UK 0 000 Unm 0 0
|
|
|
|
+ * MK 1 001 TLB 1
|
|
|
|
+ * MSK 2 010 TLB TLB 1
|
|
|
|
+ * MUSK 3 011 TLB TLB TLB 1
|
|
|
|
+ * MUSUK 4 100 TLB TLB Unm 0 1
|
|
|
|
+ * USK 5 101 Unm Unm 0 0
|
|
|
|
+ * - 6 110 0 0
|
|
|
|
+ * UUSK 7 111 Unm Unm Unm 0 0
|
|
|
|
+ *
|
|
|
|
+ * We shift a magic value by AM across the sign bit to find if always
|
|
|
|
+ * TLB mapped, and if not shift by 8 again to find if it depends on KM.
|
|
|
|
+ */
|
|
|
|
+ am_lookup = 0x70080000 << am;
|
|
|
|
+ if ((s32)am_lookup < 0) {
|
|
|
|
+ /*
|
|
|
|
+ * MK, MSK, MUSK
|
|
|
|
+ * Always TLB mapped, unless SegCtl.EU && ERL
|
|
|
|
+ */
|
|
|
|
+ if (!eu || !(read_gc0_status() & ST0_ERL))
|
|
|
|
+ return true;
|
|
|
|
+ } else {
|
|
|
|
+ am_lookup <<= 8;
|
|
|
|
+ if ((s32)am_lookup < 0) {
|
|
|
|
+ union mips_instruction inst;
|
|
|
|
+ unsigned int status;
|
|
|
|
+ u32 *opc;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * MUSUK
|
|
|
|
+ * TLB mapped if not in kernel mode
|
|
|
|
+ */
|
|
|
|
+ status = read_gc0_status();
|
|
|
|
+ if (!(status & (ST0_EXL | ST0_ERL)) &&
|
|
|
|
+ (status & ST0_KSU))
|
|
|
|
+ return true;
|
|
|
|
+ /*
|
|
|
|
+ * EVA access instructions in kernel
|
|
|
|
+ * mode access user address space.
|
|
|
|
+ */
|
|
|
|
+ opc = (u32 *)vcpu->arch.pc;
|
|
|
|
+ if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
|
|
|
|
+ opc += 1;
|
|
|
|
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
|
|
|
|
+ if (!err && is_eva_access(inst))
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
|
|
* kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
|
|
* @vcpu: KVM VCPU state.
|
|
* @vcpu: KVM VCPU state.
|
|
@@ -427,10 +538,58 @@ static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
|
|
unsigned long *gpa)
|
|
unsigned long *gpa)
|
|
{
|
|
{
|
|
u32 gva32 = gva;
|
|
u32 gva32 = gva;
|
|
|
|
+ unsigned long segctl;
|
|
|
|
|
|
if ((long)gva == (s32)gva32) {
|
|
if ((long)gva == (s32)gva32) {
|
|
/* Handle canonical 32-bit virtual address */
|
|
/* Handle canonical 32-bit virtual address */
|
|
- if ((s32)gva32 < (s32)0xc0000000) {
|
|
|
|
|
|
+ if (cpu_guest_has_segments) {
|
|
|
|
+ unsigned long mask, pa;
|
|
|
|
+
|
|
|
|
+ switch (gva32 >> 29) {
|
|
|
|
+ case 0:
|
|
|
|
+ case 1: /* CFG5 (1GB) */
|
|
|
|
+ segctl = read_gc0_segctl2() >> 16;
|
|
|
|
+ mask = (unsigned long)0xfc0000000ull;
|
|
|
|
+ break;
|
|
|
|
+ case 2:
|
|
|
|
+ case 3: /* CFG4 (1GB) */
|
|
|
|
+ segctl = read_gc0_segctl2();
|
|
|
|
+ mask = (unsigned long)0xfc0000000ull;
|
|
|
|
+ break;
|
|
|
|
+ case 4: /* CFG3 (512MB) */
|
|
|
|
+ segctl = read_gc0_segctl1() >> 16;
|
|
|
|
+ mask = (unsigned long)0xfe0000000ull;
|
|
|
|
+ break;
|
|
|
|
+ case 5: /* CFG2 (512MB) */
|
|
|
|
+ segctl = read_gc0_segctl1();
|
|
|
|
+ mask = (unsigned long)0xfe0000000ull;
|
|
|
|
+ break;
|
|
|
|
+ case 6: /* CFG1 (512MB) */
|
|
|
|
+ segctl = read_gc0_segctl0() >> 16;
|
|
|
|
+ mask = (unsigned long)0xfe0000000ull;
|
|
|
|
+ break;
|
|
|
|
+ case 7: /* CFG0 (512MB) */
|
|
|
|
+ segctl = read_gc0_segctl0();
|
|
|
|
+ mask = (unsigned long)0xfe0000000ull;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ /*
|
|
|
|
+ * GCC 4.9 isn't smart enough to figure out that
|
|
|
|
+ * segctl and mask are always initialised.
|
|
|
|
+ */
|
|
|
|
+ unreachable();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
|
|
|
|
+ segctl & 0x0008))
|
|
|
|
+ goto tlb_mapped;
|
|
|
|
+
|
|
|
|
+ /* Unmapped, find guest physical address */
|
|
|
|
+ pa = (segctl << 20) & mask;
|
|
|
|
+ pa |= gva32 & ~mask;
|
|
|
|
+ *gpa = pa;
|
|
|
|
+ return 0;
|
|
|
|
+ } else if ((s32)gva32 < (s32)0xc0000000) {
|
|
/* legacy unmapped KSeg0 or KSeg1 */
|
|
/* legacy unmapped KSeg0 or KSeg1 */
|
|
*gpa = gva32 & 0x1fffffff;
|
|
*gpa = gva32 & 0x1fffffff;
|
|
return 0;
|
|
return 0;
|
|
@@ -438,6 +597,20 @@ static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
|
|
#ifdef CONFIG_64BIT
|
|
#ifdef CONFIG_64BIT
|
|
} else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
|
|
} else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
|
|
/* XKPHYS */
|
|
/* XKPHYS */
|
|
|
|
+ if (cpu_guest_has_segments) {
|
|
|
|
+ /*
|
|
|
|
+ * Each of the 8 regions can be overridden by SegCtl2.XR
|
|
|
|
+ * to use SegCtl1.XAM.
|
|
|
|
+ */
|
|
|
|
+ segctl = read_gc0_segctl2();
|
|
|
|
+ if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
|
|
|
|
+ segctl = read_gc0_segctl1();
|
|
|
|
+ if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
|
|
|
|
+ 0))
|
|
|
|
+ goto tlb_mapped;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
/*
|
|
/*
|
|
* Traditionally fully unmapped.
|
|
* Traditionally fully unmapped.
|
|
* Bits 61:59 specify the CCA, which we can just mask off here.
|
|
* Bits 61:59 specify the CCA, which we can just mask off here.
|
|
@@ -449,6 +622,7 @@ static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+tlb_mapped:
|
|
return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
|
|
return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1212,6 +1386,12 @@ static u64 kvm_vz_get_one_regs_contextconfig[] = {
|
|
#endif
|
|
#endif
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static u64 kvm_vz_get_one_regs_segments[] = {
|
|
|
|
+ KVM_REG_MIPS_CP0_SEGCTL0,
|
|
|
|
+ KVM_REG_MIPS_CP0_SEGCTL1,
|
|
|
|
+ KVM_REG_MIPS_CP0_SEGCTL2,
|
|
|
|
+};
|
|
|
|
+
|
|
static u64 kvm_vz_get_one_regs_kscratch[] = {
|
|
static u64 kvm_vz_get_one_regs_kscratch[] = {
|
|
KVM_REG_MIPS_CP0_KSCRATCH1,
|
|
KVM_REG_MIPS_CP0_KSCRATCH1,
|
|
KVM_REG_MIPS_CP0_KSCRATCH2,
|
|
KVM_REG_MIPS_CP0_KSCRATCH2,
|
|
@@ -1234,6 +1414,8 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
|
|
++ret;
|
|
++ret;
|
|
if (cpu_guest_has_contextconfig)
|
|
if (cpu_guest_has_contextconfig)
|
|
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
|
|
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
|
|
|
|
+ if (cpu_guest_has_segments)
|
|
|
|
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
|
|
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
|
|
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -1273,6 +1455,12 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
|
|
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
|
|
}
|
|
}
|
|
|
|
+ if (cpu_guest_has_segments) {
|
|
|
|
+ if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
|
|
|
|
+ sizeof(kvm_vz_get_one_regs_segments)))
|
|
|
|
+ return -EFAULT;
|
|
|
|
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
|
|
|
|
+ }
|
|
for (i = 0; i < 6; ++i) {
|
|
for (i = 0; i < 6; ++i) {
|
|
if (!cpu_guest_has_kscr(i + 2))
|
|
if (!cpu_guest_has_kscr(i + 2))
|
|
continue;
|
|
continue;
|
|
@@ -1361,6 +1549,21 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
|
|
case KVM_REG_MIPS_CP0_PAGEGRAIN:
|
|
case KVM_REG_MIPS_CP0_PAGEGRAIN:
|
|
*v = (long)read_gc0_pagegrain();
|
|
*v = (long)read_gc0_pagegrain();
|
|
break;
|
|
break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_SEGCTL0:
|
|
|
|
+ if (!cpu_guest_has_segments)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ *v = read_gc0_segctl0();
|
|
|
|
+ break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_SEGCTL1:
|
|
|
|
+ if (!cpu_guest_has_segments)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ *v = read_gc0_segctl1();
|
|
|
|
+ break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_SEGCTL2:
|
|
|
|
+ if (!cpu_guest_has_segments)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ *v = read_gc0_segctl2();
|
|
|
|
+ break;
|
|
case KVM_REG_MIPS_CP0_WIRED:
|
|
case KVM_REG_MIPS_CP0_WIRED:
|
|
*v = (long)read_gc0_wired();
|
|
*v = (long)read_gc0_wired();
|
|
break;
|
|
break;
|
|
@@ -1528,6 +1731,21 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
|
|
case KVM_REG_MIPS_CP0_PAGEGRAIN:
|
|
case KVM_REG_MIPS_CP0_PAGEGRAIN:
|
|
write_gc0_pagegrain(v);
|
|
write_gc0_pagegrain(v);
|
|
break;
|
|
break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_SEGCTL0:
|
|
|
|
+ if (!cpu_guest_has_segments)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ write_gc0_segctl0(v);
|
|
|
|
+ break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_SEGCTL1:
|
|
|
|
+ if (!cpu_guest_has_segments)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ write_gc0_segctl1(v);
|
|
|
|
+ break;
|
|
|
|
+ case KVM_REG_MIPS_CP0_SEGCTL2:
|
|
|
|
+ if (!cpu_guest_has_segments)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ write_gc0_segctl2(v);
|
|
|
|
+ break;
|
|
case KVM_REG_MIPS_CP0_WIRED:
|
|
case KVM_REG_MIPS_CP0_WIRED:
|
|
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
|
|
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
|
|
break;
|
|
break;
|
|
@@ -1955,6 +2173,12 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
if (cpu_guest_has_badinstrp)
|
|
if (cpu_guest_has_badinstrp)
|
|
kvm_restore_gc0_badinstrp(cop0);
|
|
kvm_restore_gc0_badinstrp(cop0);
|
|
|
|
|
|
|
|
+ if (cpu_guest_has_segments) {
|
|
|
|
+ kvm_restore_gc0_segctl0(cop0);
|
|
|
|
+ kvm_restore_gc0_segctl1(cop0);
|
|
|
|
+ kvm_restore_gc0_segctl2(cop0);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* restore Root.GuestCtl2 from unused Guest guestctl2 register */
|
|
/* restore Root.GuestCtl2 from unused Guest guestctl2 register */
|
|
if (cpu_has_guestctl2)
|
|
if (cpu_has_guestctl2)
|
|
write_c0_guestctl2(
|
|
write_c0_guestctl2(
|
|
@@ -2038,6 +2262,12 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
|
if (cpu_guest_has_badinstrp)
|
|
if (cpu_guest_has_badinstrp)
|
|
kvm_save_gc0_badinstrp(cop0);
|
|
kvm_save_gc0_badinstrp(cop0);
|
|
|
|
|
|
|
|
+ if (cpu_guest_has_segments) {
|
|
|
|
+ kvm_save_gc0_segctl0(cop0);
|
|
|
|
+ kvm_save_gc0_segctl1(cop0);
|
|
|
|
+ kvm_save_gc0_segctl2(cop0);
|
|
|
|
+ }
|
|
|
|
+
|
|
kvm_vz_save_timer(vcpu);
|
|
kvm_vz_save_timer(vcpu);
|
|
|
|
|
|
/* save Root.GuestCtl2 in unused Guest guestctl2 register */
|
|
/* save Root.GuestCtl2 in unused Guest guestctl2 register */
|
|
@@ -2356,6 +2586,16 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* Implementation dependent, use the legacy layout */
|
|
|
|
+ if (cpu_guest_has_segments) {
|
|
|
|
+ /* SegCtl0, SegCtl1, SegCtl2 */
|
|
|
|
+ kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
|
|
|
|
+ kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
|
|
|
|
+ (_page_cachable_default >> _CACHE_SHIFT) <<
|
|
|
|
+ (16 + MIPS_SEGCFG_C_SHIFT));
|
|
|
|
+ kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* start with no pending virtual guest interrupts */
|
|
/* start with no pending virtual guest interrupts */
|
|
if (cpu_has_guestctl2)
|
|
if (cpu_has_guestctl2)
|
|
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
|
|
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
|