|
@@ -52,7 +52,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
|
|
|
goto unaligned;
|
|
|
|
|
|
/* Read the instruction */
|
|
|
- insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
|
|
|
+ insn.word = kvm_get_inst((u32 *) epc, vcpu);
|
|
|
|
|
|
if (insn.word == KVM_INVALID_INST)
|
|
|
return KVM_INVALID_INST;
|
|
@@ -161,9 +161,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
|
|
|
nextpc = epc;
|
|
|
break;
|
|
|
|
|
|
- case blez_op: /* not really i_format */
|
|
|
- case blezl_op:
|
|
|
- /* rt field assumed to be zero */
|
|
|
+ case blez_op: /* POP06 */
|
|
|
+#ifndef CONFIG_CPU_MIPSR6
|
|
|
+ case blezl_op: /* removed in R6 */
|
|
|
+#endif
|
|
|
+ if (insn.i_format.rt != 0)
|
|
|
+ goto compact_branch;
|
|
|
if ((long)arch->gprs[insn.i_format.rs] <= 0)
|
|
|
epc = epc + 4 + (insn.i_format.simmediate << 2);
|
|
|
else
|
|
@@ -171,9 +174,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
|
|
|
nextpc = epc;
|
|
|
break;
|
|
|
|
|
|
- case bgtz_op:
|
|
|
- case bgtzl_op:
|
|
|
- /* rt field assumed to be zero */
|
|
|
+ case bgtz_op: /* POP07 */
|
|
|
+#ifndef CONFIG_CPU_MIPSR6
|
|
|
+ case bgtzl_op: /* removed in R6 */
|
|
|
+#endif
|
|
|
+ if (insn.i_format.rt != 0)
|
|
|
+ goto compact_branch;
|
|
|
if ((long)arch->gprs[insn.i_format.rs] > 0)
|
|
|
epc = epc + 4 + (insn.i_format.simmediate << 2);
|
|
|
else
|
|
@@ -185,6 +191,40 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
|
|
|
case cop1_op:
|
|
|
kvm_err("%s: unsupported cop1_op\n", __func__);
|
|
|
break;
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_MIPSR6
|
|
|
+ /* R6 added the following compact branches with forbidden slots */
|
|
|
+ case blezl_op: /* POP26 */
|
|
|
+ case bgtzl_op: /* POP27 */
|
|
|
+ /* only rt == 0 isn't compact branch */
|
|
|
+ if (insn.i_format.rt != 0)
|
|
|
+ goto compact_branch;
|
|
|
+ break;
|
|
|
+ case pop10_op:
|
|
|
+ case pop30_op:
|
|
|
+ /* only rs == rt == 0 is reserved, rest are compact branches */
|
|
|
+ if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
|
|
|
+ goto compact_branch;
|
|
|
+ break;
|
|
|
+ case pop66_op:
|
|
|
+ case pop76_op:
|
|
|
+ /* only rs == 0 isn't compact branch */
|
|
|
+ if (insn.i_format.rs != 0)
|
|
|
+ goto compact_branch;
|
|
|
+ break;
|
|
|
+compact_branch:
|
|
|
+ /*
|
|
|
+ * If we've hit an exception on the forbidden slot, then
|
|
|
+ * the branch must not have been taken.
|
|
|
+ */
|
|
|
+ epc += 8;
|
|
|
+ nextpc = epc;
|
|
|
+ break;
|
|
|
+#else
|
|
|
+compact_branch:
|
|
|
+ /* Compact branches not supported before R6 */
|
|
|
+ break;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
return nextpc;
|
|
@@ -198,7 +238,7 @@ sigill:
|
|
|
return nextpc;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
|
|
|
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
|
|
|
{
|
|
|
unsigned long branch_pc;
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
@@ -243,7 +283,7 @@ static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
|
|
*
|
|
|
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
|
|
|
*/
|
|
|
-static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
|
|
|
+static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
|
|
|
{
|
|
|
s64 now_ns, periods;
|
|
|
u64 delta;
|
|
@@ -300,11 +340,11 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
|
|
|
*
|
|
|
* Returns: The current value of the guest CP0_Count register.
|
|
|
*/
|
|
|
-static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
|
|
+static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
ktime_t expires, threshold;
|
|
|
- uint32_t count, compare;
|
|
|
+ u32 count, compare;
|
|
|
int running;
|
|
|
|
|
|
/* Calculate the biased and scaled guest CP0_Count */
|
|
@@ -315,7 +355,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
|
|
* Find whether CP0_Count has reached the closest timer interrupt. If
|
|
|
* not, we shouldn't inject it.
|
|
|
*/
|
|
|
- if ((int32_t)(count - compare) < 0)
|
|
|
+ if ((s32)(count - compare) < 0)
|
|
|
return count;
|
|
|
|
|
|
/*
|
|
@@ -360,7 +400,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
|
|
*
|
|
|
* Returns: The current guest CP0_Count value.
|
|
|
*/
|
|
|
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
|
|
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
|
|
@@ -387,8 +427,7 @@ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
|
|
*
|
|
|
* Returns: The ktime at the point of freeze.
|
|
|
*/
|
|
|
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
|
|
|
- uint32_t *count)
|
|
|
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
|
|
|
{
|
|
|
ktime_t now;
|
|
|
|
|
@@ -419,16 +458,16 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
|
|
|
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
|
|
|
*/
|
|
|
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
|
|
- ktime_t now, uint32_t count)
|
|
|
+ ktime_t now, u32 count)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
- uint32_t compare;
|
|
|
+ u32 compare;
|
|
|
u64 delta;
|
|
|
ktime_t expire;
|
|
|
|
|
|
/* Calculate timeout (wrap 0 to 2^32) */
|
|
|
compare = kvm_read_c0_guest_compare(cop0);
|
|
|
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
|
|
|
+ delta = (u64)(u32)(compare - count - 1) + 1;
|
|
|
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
|
|
|
expire = ktime_add_ns(now, delta);
|
|
|
|
|
@@ -444,7 +483,7 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
|
|
*
|
|
|
* Sets the CP0_Count value and updates the timer accordingly.
|
|
|
*/
|
|
|
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
|
|
|
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
ktime_t now;
|
|
@@ -538,13 +577,13 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
|
|
|
* If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
|
|
|
* any pending timer interrupt is preserved.
|
|
|
*/
|
|
|
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
|
|
|
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
int dc;
|
|
|
u32 old_compare = kvm_read_c0_guest_compare(cop0);
|
|
|
ktime_t now;
|
|
|
- uint32_t count;
|
|
|
+ u32 count;
|
|
|
|
|
|
/* if unchanged, must just be an ack */
|
|
|
if (old_compare == compare) {
|
|
@@ -585,7 +624,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
|
|
|
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
- uint32_t count;
|
|
|
+ u32 count;
|
|
|
ktime_t now;
|
|
|
|
|
|
/* Stop hrtimer */
|
|
@@ -632,7 +671,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
|
|
|
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
- uint32_t count;
|
|
|
+ u32 count;
|
|
|
|
|
|
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
|
|
|
|
|
@@ -661,7 +700,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
|
|
|
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
|
|
|
s64 delta;
|
|
|
ktime_t expire, now;
|
|
|
- uint32_t count, compare;
|
|
|
+ u32 count, compare;
|
|
|
|
|
|
/* Only allow defined bits to be changed */
|
|
|
if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
|
|
@@ -687,7 +726,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
|
|
|
*/
|
|
|
count = kvm_read_c0_guest_count(cop0);
|
|
|
compare = kvm_read_c0_guest_compare(cop0);
|
|
|
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
|
|
|
+ delta = (u64)(u32)(compare - count - 1) + 1;
|
|
|
delta = div_u64(delta * NSEC_PER_SEC,
|
|
|
vcpu->arch.count_hz);
|
|
|
expire = ktime_add_ns(vcpu->arch.count_resume, delta);
|
|
@@ -776,7 +815,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.pending_exceptions);
|
|
|
|
|
|
++vcpu->stat.wait_exits;
|
|
|
- trace_kvm_exit(vcpu, WAIT_EXITS);
|
|
|
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
|
|
|
if (!vcpu->arch.pending_exceptions) {
|
|
|
vcpu->arch.wait = 1;
|
|
|
kvm_vcpu_block(vcpu);
|
|
@@ -801,9 +840,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
|
|
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
- uint32_t pc = vcpu->arch.pc;
|
|
|
+ unsigned long pc = vcpu->arch.pc;
|
|
|
|
|
|
- kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
|
|
|
+ kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
|
|
|
return EMULATE_FAIL;
|
|
|
}
|
|
|
|
|
@@ -813,11 +852,11 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
int index = kvm_read_c0_guest_index(cop0);
|
|
|
struct kvm_mips_tlb *tlb = NULL;
|
|
|
- uint32_t pc = vcpu->arch.pc;
|
|
|
+ unsigned long pc = vcpu->arch.pc;
|
|
|
|
|
|
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
|
|
|
kvm_debug("%s: illegal index: %d\n", __func__, index);
|
|
|
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
|
|
|
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
|
|
|
pc, index, kvm_read_c0_guest_entryhi(cop0),
|
|
|
kvm_read_c0_guest_entrylo0(cop0),
|
|
|
kvm_read_c0_guest_entrylo1(cop0),
|
|
@@ -834,10 +873,10 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
|
|
|
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
|
|
|
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
|
|
|
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
|
|
|
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
|
|
|
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
|
|
|
|
|
|
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
|
|
|
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
|
|
|
pc, index, kvm_read_c0_guest_entryhi(cop0),
|
|
|
kvm_read_c0_guest_entrylo0(cop0),
|
|
|
kvm_read_c0_guest_entrylo1(cop0),
|
|
@@ -851,7 +890,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
struct kvm_mips_tlb *tlb = NULL;
|
|
|
- uint32_t pc = vcpu->arch.pc;
|
|
|
+ unsigned long pc = vcpu->arch.pc;
|
|
|
int index;
|
|
|
|
|
|
get_random_bytes(&index, sizeof(index));
|
|
@@ -867,10 +906,10 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
|
|
|
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
|
|
|
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
|
|
|
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
|
|
|
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
|
|
|
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
|
|
|
|
|
|
- kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
|
|
|
+ kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
|
|
|
pc, index, kvm_read_c0_guest_entryhi(cop0),
|
|
|
kvm_read_c0_guest_entrylo0(cop0),
|
|
|
kvm_read_c0_guest_entrylo1(cop0));
|
|
@@ -882,14 +921,14 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
long entryhi = kvm_read_c0_guest_entryhi(cop0);
|
|
|
- uint32_t pc = vcpu->arch.pc;
|
|
|
+ unsigned long pc = vcpu->arch.pc;
|
|
|
int index = -1;
|
|
|
|
|
|
index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
|
|
|
|
|
|
kvm_write_c0_guest_index(cop0, index);
|
|
|
|
|
|
- kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
|
|
|
+ kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
|
|
|
index);
|
|
|
|
|
|
return EMULATE_DONE;
|
|
@@ -922,8 +961,8 @@ unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
|
|
|
*/
|
|
|
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- /* Config4 is optional */
|
|
|
- unsigned int mask = MIPS_CONF_M;
|
|
|
+ /* Config4 and ULRI are optional */
|
|
|
+ unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
|
|
|
|
|
|
/* Permit MSA to be present if MSA is supported */
|
|
|
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
|
|
@@ -942,7 +981,12 @@ unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
|
|
|
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
/* Config5 is optional */
|
|
|
- return MIPS_CONF_M;
|
|
|
+ unsigned int mask = MIPS_CONF_M;
|
|
|
+
|
|
|
+ /* KScrExist */
|
|
|
+ mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
|
|
|
+
|
|
|
+ return mask;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -973,14 +1017,14 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
|
|
|
return mask;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
- uint32_t cause, struct kvm_run *run,
|
|
|
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
|
|
+ u32 *opc, u32 cause,
|
|
|
+ struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
- int32_t rt, rd, copz, sel, co_bit, op;
|
|
|
- uint32_t pc = vcpu->arch.pc;
|
|
|
+ u32 rt, rd, sel;
|
|
|
unsigned long curr_pc;
|
|
|
|
|
|
/*
|
|
@@ -992,16 +1036,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
if (er == EMULATE_FAIL)
|
|
|
return er;
|
|
|
|
|
|
- copz = (inst >> 21) & 0x1f;
|
|
|
- rt = (inst >> 16) & 0x1f;
|
|
|
- rd = (inst >> 11) & 0x1f;
|
|
|
- sel = inst & 0x7;
|
|
|
- co_bit = (inst >> 25) & 1;
|
|
|
-
|
|
|
- if (co_bit) {
|
|
|
- op = (inst) & 0xff;
|
|
|
-
|
|
|
- switch (op) {
|
|
|
+ if (inst.co_format.co) {
|
|
|
+ switch (inst.co_format.func) {
|
|
|
case tlbr_op: /* Read indexed TLB entry */
|
|
|
er = kvm_mips_emul_tlbr(vcpu);
|
|
|
break;
|
|
@@ -1020,47 +1056,58 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
case eret_op:
|
|
|
er = kvm_mips_emul_eret(vcpu);
|
|
|
goto dont_update_pc;
|
|
|
- break;
|
|
|
case wait_op:
|
|
|
er = kvm_mips_emul_wait(vcpu);
|
|
|
break;
|
|
|
}
|
|
|
} else {
|
|
|
- switch (copz) {
|
|
|
+ rt = inst.c0r_format.rt;
|
|
|
+ rd = inst.c0r_format.rd;
|
|
|
+ sel = inst.c0r_format.sel;
|
|
|
+
|
|
|
+ switch (inst.c0r_format.rs) {
|
|
|
case mfc_op:
|
|
|
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
|
|
|
cop0->stat[rd][sel]++;
|
|
|
#endif
|
|
|
/* Get reg */
|
|
|
if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
|
|
|
- vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
|
|
|
+ vcpu->arch.gprs[rt] =
|
|
|
+ (s32)kvm_mips_read_count(vcpu);
|
|
|
} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
|
|
|
vcpu->arch.gprs[rt] = 0x0;
|
|
|
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
|
|
kvm_mips_trans_mfc0(inst, opc, vcpu);
|
|
|
#endif
|
|
|
} else {
|
|
|
- vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
|
|
|
+ vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
|
|
|
|
|
|
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
|
|
kvm_mips_trans_mfc0(inst, opc, vcpu);
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
- kvm_debug
|
|
|
- ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
|
|
|
- pc, rd, sel, rt, vcpu->arch.gprs[rt]);
|
|
|
-
|
|
|
+ trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
|
|
|
+ KVM_TRACE_COP0(rd, sel),
|
|
|
+ vcpu->arch.gprs[rt]);
|
|
|
break;
|
|
|
|
|
|
case dmfc_op:
|
|
|
vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
|
|
|
+
|
|
|
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
|
|
|
+ KVM_TRACE_COP0(rd, sel),
|
|
|
+ vcpu->arch.gprs[rt]);
|
|
|
break;
|
|
|
|
|
|
case mtc_op:
|
|
|
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
|
|
|
cop0->stat[rd][sel]++;
|
|
|
#endif
|
|
|
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
|
|
|
+ KVM_TRACE_COP0(rd, sel),
|
|
|
+ vcpu->arch.gprs[rt]);
|
|
|
+
|
|
|
if ((rd == MIPS_CP0_TLB_INDEX)
|
|
|
&& (vcpu->arch.gprs[rt] >=
|
|
|
KVM_MIPS_GUEST_TLB_SIZE)) {
|
|
@@ -1078,16 +1125,15 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
|
|
|
kvm_read_c0_guest_ebase(cop0));
|
|
|
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
|
|
|
- uint32_t nasid =
|
|
|
+ u32 nasid =
|
|
|
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
|
|
|
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
|
|
|
((kvm_read_c0_guest_entryhi(cop0) &
|
|
|
KVM_ENTRYHI_ASID) != nasid)) {
|
|
|
- kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
|
|
|
+ trace_kvm_asid_change(vcpu,
|
|
|
kvm_read_c0_guest_entryhi(cop0)
|
|
|
- & KVM_ENTRYHI_ASID,
|
|
|
- vcpu->arch.gprs[rt]
|
|
|
- & KVM_ENTRYHI_ASID);
|
|
|
+ & KVM_ENTRYHI_ASID,
|
|
|
+ nasid);
|
|
|
|
|
|
/* Blow away the shadow host TLBs */
|
|
|
kvm_mips_flush_host_tlb(1);
|
|
@@ -1100,10 +1146,6 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
|
|
|
goto done;
|
|
|
} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
|
|
|
- kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
|
|
|
- pc, kvm_read_c0_guest_compare(cop0),
|
|
|
- vcpu->arch.gprs[rt]);
|
|
|
-
|
|
|
/* If we are writing to COMPARE */
|
|
|
/* Clear pending timer interrupt, if any */
|
|
|
kvm_mips_write_compare(vcpu,
|
|
@@ -1155,7 +1197,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
* it first.
|
|
|
*/
|
|
|
if (change & ST0_CU1 && !(val & ST0_FR) &&
|
|
|
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
|
|
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
|
|
|
kvm_lose_fpu(vcpu);
|
|
|
|
|
|
/*
|
|
@@ -1166,7 +1208,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
* the near future.
|
|
|
*/
|
|
|
if (change & ST0_CU1 &&
|
|
|
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
|
|
|
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
|
|
|
change_c0_status(ST0_CU1, val);
|
|
|
|
|
|
preempt_enable();
|
|
@@ -1201,7 +1243,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
* context is already loaded.
|
|
|
*/
|
|
|
if (change & MIPS_CONF5_FRE &&
|
|
|
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
|
|
|
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
|
|
|
change_c0_config5(MIPS_CONF5_FRE, val);
|
|
|
|
|
|
/*
|
|
@@ -1211,7 +1253,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
* quickly enabled again in the near future.
|
|
|
*/
|
|
|
if (change & MIPS_CONF5_MSAEN &&
|
|
|
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
|
|
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
|
|
|
change_c0_config5(MIPS_CONF5_MSAEN,
|
|
|
val);
|
|
|
|
|
@@ -1219,7 +1261,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
|
|
|
kvm_write_c0_guest_config5(cop0, val);
|
|
|
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
|
|
|
- uint32_t old_cause, new_cause;
|
|
|
+ u32 old_cause, new_cause;
|
|
|
|
|
|
old_cause = kvm_read_c0_guest_cause(cop0);
|
|
|
new_cause = vcpu->arch.gprs[rt];
|
|
@@ -1233,20 +1275,30 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
else
|
|
|
kvm_mips_count_enable_cause(vcpu);
|
|
|
}
|
|
|
+ } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
|
|
|
+ u32 mask = MIPS_HWRENA_CPUNUM |
|
|
|
+ MIPS_HWRENA_SYNCISTEP |
|
|
|
+ MIPS_HWRENA_CC |
|
|
|
+ MIPS_HWRENA_CCRES;
|
|
|
+
|
|
|
+ if (kvm_read_c0_guest_config3(cop0) &
|
|
|
+ MIPS_CONF3_ULRI)
|
|
|
+ mask |= MIPS_HWRENA_ULR;
|
|
|
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
|
|
|
} else {
|
|
|
cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
|
|
|
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
|
|
kvm_mips_trans_mtc0(inst, opc, vcpu);
|
|
|
#endif
|
|
|
}
|
|
|
-
|
|
|
- kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
|
|
|
- rd, sel, cop0->reg[rd][sel]);
|
|
|
break;
|
|
|
|
|
|
case dmtc_op:
|
|
|
kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
|
|
|
vcpu->arch.pc, rt, rd, sel);
|
|
|
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
|
|
|
+ KVM_TRACE_COP0(rd, sel),
|
|
|
+ vcpu->arch.gprs[rt]);
|
|
|
er = EMULATE_FAIL;
|
|
|
break;
|
|
|
|
|
@@ -1258,7 +1310,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
vcpu->arch.gprs[rt] =
|
|
|
kvm_read_c0_guest_status(cop0);
|
|
|
/* EI */
|
|
|
- if (inst & 0x20) {
|
|
|
+ if (inst.mfmc0_format.sc) {
|
|
|
kvm_debug("[%#lx] mfmc0_op: EI\n",
|
|
|
vcpu->arch.pc);
|
|
|
kvm_set_c0_guest_status(cop0, ST0_IE);
|
|
@@ -1272,9 +1324,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
|
|
|
case wrpgpr_op:
|
|
|
{
|
|
|
- uint32_t css =
|
|
|
- cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
|
|
|
- uint32_t pss =
|
|
|
+ u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
|
|
|
+ u32 pss =
|
|
|
(cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
|
|
|
/*
|
|
|
* We don't support any shadow register sets, so
|
|
@@ -1291,7 +1342,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
|
|
break;
|
|
|
default:
|
|
|
kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
|
|
|
- vcpu->arch.pc, copz);
|
|
|
+ vcpu->arch.pc, inst.c0r_format.rs);
|
|
|
er = EMULATE_FAIL;
|
|
|
break;
|
|
|
}
|
|
@@ -1312,13 +1363,14 @@ dont_update_pc:
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
|
|
|
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
|
|
+ u32 cause,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
enum emulation_result er = EMULATE_DO_MMIO;
|
|
|
- int32_t op, base, rt, offset;
|
|
|
- uint32_t bytes;
|
|
|
+ u32 rt;
|
|
|
+ u32 bytes;
|
|
|
void *data = run->mmio.data;
|
|
|
unsigned long curr_pc;
|
|
|
|
|
@@ -1331,12 +1383,9 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
|
|
|
if (er == EMULATE_FAIL)
|
|
|
return er;
|
|
|
|
|
|
- rt = (inst >> 16) & 0x1f;
|
|
|
- base = (inst >> 21) & 0x1f;
|
|
|
- offset = inst & 0xffff;
|
|
|
- op = (inst >> 26) & 0x3f;
|
|
|
+ rt = inst.i_format.rt;
|
|
|
|
|
|
- switch (op) {
|
|
|
+ switch (inst.i_format.opcode) {
|
|
|
case sb_op:
|
|
|
bytes = 1;
|
|
|
if (bytes > sizeof(run->mmio.data)) {
|
|
@@ -1357,7 +1406,7 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
|
|
|
*(u8 *) data = vcpu->arch.gprs[rt];
|
|
|
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
|
|
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
|
|
|
- *(uint8_t *) data);
|
|
|
+ *(u8 *) data);
|
|
|
|
|
|
break;
|
|
|
|
|
@@ -1379,11 +1428,11 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
|
|
|
run->mmio.is_write = 1;
|
|
|
vcpu->mmio_needed = 1;
|
|
|
vcpu->mmio_is_write = 1;
|
|
|
- *(uint32_t *) data = vcpu->arch.gprs[rt];
|
|
|
+ *(u32 *) data = vcpu->arch.gprs[rt];
|
|
|
|
|
|
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
|
|
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
|
|
- vcpu->arch.gprs[rt], *(uint32_t *) data);
|
|
|
+ vcpu->arch.gprs[rt], *(u32 *) data);
|
|
|
break;
|
|
|
|
|
|
case sh_op:
|
|
@@ -1404,15 +1453,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
|
|
|
run->mmio.is_write = 1;
|
|
|
vcpu->mmio_needed = 1;
|
|
|
vcpu->mmio_is_write = 1;
|
|
|
- *(uint16_t *) data = vcpu->arch.gprs[rt];
|
|
|
+ *(u16 *) data = vcpu->arch.gprs[rt];
|
|
|
|
|
|
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
|
|
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
|
|
- vcpu->arch.gprs[rt], *(uint32_t *) data);
|
|
|
+ vcpu->arch.gprs[rt], *(u32 *) data);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- kvm_err("Store not yet supported");
|
|
|
+ kvm_err("Store not yet supported (inst=0x%08x)\n",
|
|
|
+ inst.word);
|
|
|
er = EMULATE_FAIL;
|
|
|
break;
|
|
|
}
|
|
@@ -1424,18 +1474,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
|
|
|
- struct kvm_run *run,
|
|
|
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
|
|
+ u32 cause, struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
enum emulation_result er = EMULATE_DO_MMIO;
|
|
|
- int32_t op, base, rt, offset;
|
|
|
- uint32_t bytes;
|
|
|
+ u32 op, rt;
|
|
|
+ u32 bytes;
|
|
|
|
|
|
- rt = (inst >> 16) & 0x1f;
|
|
|
- base = (inst >> 21) & 0x1f;
|
|
|
- offset = inst & 0xffff;
|
|
|
- op = (inst >> 26) & 0x3f;
|
|
|
+ rt = inst.i_format.rt;
|
|
|
+ op = inst.i_format.opcode;
|
|
|
|
|
|
vcpu->arch.pending_load_cause = cause;
|
|
|
vcpu->arch.io_gpr = rt;
|
|
@@ -1521,7 +1569,8 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- kvm_err("Load not yet supported");
|
|
|
+ kvm_err("Load not yet supported (inst=0x%08x)\n",
|
|
|
+ inst.word);
|
|
|
er = EMULATE_FAIL;
|
|
|
break;
|
|
|
}
|
|
@@ -1529,40 +1578,15 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- unsigned long offset = (va & ~PAGE_MASK);
|
|
|
- struct kvm *kvm = vcpu->kvm;
|
|
|
- unsigned long pa;
|
|
|
- gfn_t gfn;
|
|
|
- kvm_pfn_t pfn;
|
|
|
-
|
|
|
- gfn = va >> PAGE_SHIFT;
|
|
|
-
|
|
|
- if (gfn >= kvm->arch.guest_pmap_npages) {
|
|
|
- kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
|
|
|
- kvm_mips_dump_host_tlbs();
|
|
|
- kvm_arch_vcpu_dump_regs(vcpu);
|
|
|
- return -1;
|
|
|
- }
|
|
|
- pfn = kvm->arch.guest_pmap[gfn];
|
|
|
- pa = (pfn << PAGE_SHIFT) | offset;
|
|
|
-
|
|
|
- kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
|
|
|
- CKSEG0ADDR(pa));
|
|
|
-
|
|
|
- local_flush_icache_range(CKSEG0ADDR(pa), 32);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|
|
- uint32_t cause,
|
|
|
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
|
|
+ u32 *opc, u32 cause,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
- int32_t offset, cache, op_inst, op, base;
|
|
|
+ u32 cache, op_inst, op, base;
|
|
|
+ s16 offset;
|
|
|
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
|
|
unsigned long va;
|
|
|
unsigned long curr_pc;
|
|
@@ -1576,9 +1600,12 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|
|
if (er == EMULATE_FAIL)
|
|
|
return er;
|
|
|
|
|
|
- base = (inst >> 21) & 0x1f;
|
|
|
- op_inst = (inst >> 16) & 0x1f;
|
|
|
- offset = (int16_t)inst;
|
|
|
+ base = inst.i_format.rs;
|
|
|
+ op_inst = inst.i_format.rt;
|
|
|
+ if (cpu_has_mips_r6)
|
|
|
+ offset = inst.spec3_format.simmediate;
|
|
|
+ else
|
|
|
+ offset = inst.i_format.simmediate;
|
|
|
cache = op_inst & CacheOp_Cache;
|
|
|
op = op_inst & CacheOp_Op;
|
|
|
|
|
@@ -1634,7 +1661,6 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|
|
(cop0) & KVM_ENTRYHI_ASID));
|
|
|
|
|
|
if (index < 0) {
|
|
|
- vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
|
|
|
vcpu->arch.host_cp0_badvaddr = va;
|
|
|
vcpu->arch.pc = curr_pc;
|
|
|
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
|
|
@@ -1659,9 +1685,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|
|
* We fault an entry from the guest tlb to the
|
|
|
* shadow host TLB
|
|
|
*/
|
|
|
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
|
|
- NULL,
|
|
|
- NULL);
|
|
|
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
@@ -1714,20 +1738,20 @@ dont_update_pc:
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ union mips_instruction inst;
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
- uint32_t inst;
|
|
|
|
|
|
/* Fetch the instruction. */
|
|
|
if (cause & CAUSEF_BD)
|
|
|
opc += 1;
|
|
|
|
|
|
- inst = kvm_get_inst(opc, vcpu);
|
|
|
+ inst.word = kvm_get_inst(opc, vcpu);
|
|
|
|
|
|
- switch (((union mips_instruction)inst).r_format.opcode) {
|
|
|
+ switch (inst.r_format.opcode) {
|
|
|
case cop0_op:
|
|
|
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
|
|
|
break;
|
|
@@ -1744,15 +1768,31 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
|
|
|
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
|
|
|
break;
|
|
|
|
|
|
+#ifndef CONFIG_CPU_MIPSR6
|
|
|
case cache_op:
|
|
|
++vcpu->stat.cache_exits;
|
|
|
- trace_kvm_exit(vcpu, CACHE_EXITS);
|
|
|
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
|
|
|
er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
|
|
|
break;
|
|
|
+#else
|
|
|
+ case spec3_op:
|
|
|
+ switch (inst.spec3_format.func) {
|
|
|
+ case cache6_op:
|
|
|
+ ++vcpu->stat.cache_exits;
|
|
|
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
|
|
|
+ er = kvm_mips_emulate_cache(inst, opc, cause, run,
|
|
|
+ vcpu);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ goto unknown;
|
|
|
+ };
|
|
|
+ break;
|
|
|
+unknown:
|
|
|
+#endif
|
|
|
|
|
|
default:
|
|
|
kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
|
|
|
- inst);
|
|
|
+ inst.word);
|
|
|
kvm_arch_vcpu_dump_regs(vcpu);
|
|
|
er = EMULATE_FAIL;
|
|
|
break;
|
|
@@ -1761,8 +1801,8 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_syscall(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -1796,8 +1836,8 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -1842,8 +1882,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -1888,8 +1928,8 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -1932,8 +1972,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -1977,7 +2017,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
|
|
|
}
|
|
|
|
|
|
/* TLBMOD: store into address matching TLB with Dirty bit off */
|
|
|
-enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2005,8 +2045,8 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2048,8 +2088,8 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2077,8 +2117,8 @@ enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2112,8 +2152,8 @@ enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2147,8 +2187,8 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2182,8 +2222,8 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2217,8 +2257,8 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2252,8 +2292,8 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2287,22 +2327,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-/* ll/sc, rdhwr, sync emulation */
|
|
|
-
|
|
|
-#define OPCODE 0xfc000000
|
|
|
-#define BASE 0x03e00000
|
|
|
-#define RT 0x001f0000
|
|
|
-#define OFFSET 0x0000ffff
|
|
|
-#define LL 0xc0000000
|
|
|
-#define SC 0xe0000000
|
|
|
-#define SPEC0 0x00000000
|
|
|
-#define SPEC3 0x7c000000
|
|
|
-#define RD 0x0000f800
|
|
|
-#define FUNC 0x0000003f
|
|
|
-#define SYNC 0x0000000f
|
|
|
-#define RDHWR 0x0000003b
|
|
|
-
|
|
|
-enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -2310,7 +2335,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
|
|
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
unsigned long curr_pc;
|
|
|
- uint32_t inst;
|
|
|
+ union mips_instruction inst;
|
|
|
|
|
|
/*
|
|
|
* Update PC and hold onto current PC in case there is
|
|
@@ -2325,17 +2350,22 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
|
|
if (cause & CAUSEF_BD)
|
|
|
opc += 1;
|
|
|
|
|
|
- inst = kvm_get_inst(opc, vcpu);
|
|
|
+ inst.word = kvm_get_inst(opc, vcpu);
|
|
|
|
|
|
- if (inst == KVM_INVALID_INST) {
|
|
|
+ if (inst.word == KVM_INVALID_INST) {
|
|
|
kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
|
|
|
return EMULATE_FAIL;
|
|
|
}
|
|
|
|
|
|
- if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
|
|
|
+ if (inst.r_format.opcode == spec3_op &&
|
|
|
+ inst.r_format.func == rdhwr_op &&
|
|
|
+ inst.r_format.rs == 0 &&
|
|
|
+ (inst.r_format.re >> 3) == 0) {
|
|
|
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
|
|
|
- int rd = (inst & RD) >> 11;
|
|
|
- int rt = (inst & RT) >> 16;
|
|
|
+ int rd = inst.r_format.rd;
|
|
|
+ int rt = inst.r_format.rt;
|
|
|
+ int sel = inst.r_format.re & 0x7;
|
|
|
+
|
|
|
/* If usermode, check RDHWR rd is allowed by guest HWREna */
|
|
|
if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
|
|
|
kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
|
|
@@ -2343,17 +2373,17 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
|
|
goto emulate_ri;
|
|
|
}
|
|
|
switch (rd) {
|
|
|
- case 0: /* CPU number */
|
|
|
- arch->gprs[rt] = 0;
|
|
|
+ case MIPS_HWR_CPUNUM: /* CPU number */
|
|
|
+ arch->gprs[rt] = vcpu->vcpu_id;
|
|
|
break;
|
|
|
- case 1: /* SYNCI length */
|
|
|
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
|
|
|
arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
|
|
|
current_cpu_data.icache.linesz);
|
|
|
break;
|
|
|
- case 2: /* Read count register */
|
|
|
- arch->gprs[rt] = kvm_mips_read_count(vcpu);
|
|
|
+ case MIPS_HWR_CC: /* Read count register */
|
|
|
+ arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
|
|
|
break;
|
|
|
- case 3: /* Count register resolution */
|
|
|
+ case MIPS_HWR_CCRES: /* Count register resolution */
|
|
|
switch (current_cpu_data.cputype) {
|
|
|
case CPU_20KC:
|
|
|
case CPU_25KF:
|
|
@@ -2363,7 +2393,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
|
|
arch->gprs[rt] = 2;
|
|
|
}
|
|
|
break;
|
|
|
- case 29:
|
|
|
+ case MIPS_HWR_ULR: /* Read UserLocal register */
|
|
|
arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
|
|
|
break;
|
|
|
|
|
@@ -2371,8 +2401,12 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
|
|
kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
|
|
|
goto emulate_ri;
|
|
|
}
|
|
|
+
|
|
|
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
|
|
|
+ vcpu->arch.gprs[rt]);
|
|
|
} else {
|
|
|
- kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
|
|
|
+ kvm_debug("Emulate RI not supported @ %p: %#x\n",
|
|
|
+ opc, inst.word);
|
|
|
goto emulate_ri;
|
|
|
}
|
|
|
|
|
@@ -2405,19 +2439,19 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
switch (run->mmio.len) {
|
|
|
case 4:
|
|
|
- *gpr = *(int32_t *) run->mmio.data;
|
|
|
+ *gpr = *(s32 *) run->mmio.data;
|
|
|
break;
|
|
|
|
|
|
case 2:
|
|
|
if (vcpu->mmio_needed == 2)
|
|
|
- *gpr = *(int16_t *) run->mmio.data;
|
|
|
+ *gpr = *(s16 *) run->mmio.data;
|
|
|
else
|
|
|
- *gpr = *(uint16_t *)run->mmio.data;
|
|
|
+ *gpr = *(u16 *)run->mmio.data;
|
|
|
|
|
|
break;
|
|
|
case 1:
|
|
|
if (vcpu->mmio_needed == 2)
|
|
|
- *gpr = *(int8_t *) run->mmio.data;
|
|
|
+ *gpr = *(s8 *) run->mmio.data;
|
|
|
else
|
|
|
*gpr = *(u8 *) run->mmio.data;
|
|
|
break;
|
|
@@ -2432,12 +2466,12 @@ done:
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+static enum emulation_result kvm_mips_emulate_exc(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
|
|
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
@@ -2470,13 +2504,13 @@ static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
|
|
|
return er;
|
|
|
}
|
|
|
|
|
|
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_check_privilege(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
|
|
|
|
|
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
|
|
@@ -2566,18 +2600,18 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
|
|
|
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this
|
|
|
* case we inject the TLB from the Guest TLB into the shadow host TLB
|
|
|
*/
|
|
|
-enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
|
|
|
- uint32_t *opc,
|
|
|
+enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
|
|
|
+ u32 *opc,
|
|
|
struct kvm_run *run,
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
enum emulation_result er = EMULATE_DONE;
|
|
|
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
|
|
unsigned long va = vcpu->arch.host_cp0_badvaddr;
|
|
|
int index;
|
|
|
|
|
|
- kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
|
|
|
- vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
|
|
|
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
|
|
|
+ vcpu->arch.host_cp0_badvaddr);
|
|
|
|
|
|
/*
|
|
|
* KVM would not have got the exception if this entry was valid in the
|
|
@@ -2620,13 +2654,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
|
|
|
}
|
|
|
} else {
|
|
|
kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
|
|
|
- tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
|
|
|
+ tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
|
|
|
/*
|
|
|
* OK we have a Guest TLB entry, now inject it into the
|
|
|
* shadow host TLB
|
|
|
*/
|
|
|
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
|
|
|
- NULL);
|
|
|
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
|
|
|
}
|
|
|
}
|
|
|
|