|
@@ -891,6 +891,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
|
|
#ifdef CONFIG_EVA
|
|
#ifdef CONFIG_EVA
|
|
mm_segment_t seg;
|
|
mm_segment_t seg;
|
|
#endif
|
|
#endif
|
|
|
|
+ union fpureg *fpr;
|
|
|
|
+ enum msa_2b_fmt df;
|
|
|
|
+ unsigned int wd;
|
|
origpc = (unsigned long)pc;
|
|
origpc = (unsigned long)pc;
|
|
orig31 = regs->regs[31];
|
|
orig31 = regs->regs[31];
|
|
|
|
|
|
@@ -1202,6 +1205,75 @@ static void emulate_load_store_insn(struct pt_regs *regs,
|
|
break;
|
|
break;
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ case msa_op:
|
|
|
|
+ if (!cpu_has_msa)
|
|
|
|
+ goto sigill;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we've reached this point then userland should have taken
|
|
|
|
+ * the MSA disabled exception & initialised vector context at
|
|
|
|
+ * some point in the past.
|
|
|
|
+ */
|
|
|
|
+ BUG_ON(!thread_msa_context_live());
|
|
|
|
+
|
|
|
|
+ df = insn.msa_mi10_format.df;
|
|
|
|
+ wd = insn.msa_mi10_format.wd;
|
|
|
|
+ fpr = ¤t->thread.fpu.fpr[wd];
|
|
|
|
+
|
|
|
|
+ switch (insn.msa_mi10_format.func) {
|
|
|
|
+ case msa_ld_op:
|
|
|
|
+ if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
|
|
|
|
+ goto sigbus;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Disable preemption to avoid a race between copying
|
|
|
|
+ * state from userland, migrating to another CPU and
|
|
|
|
+ * updating the hardware vector register below.
|
|
|
|
+ */
|
|
|
|
+ preempt_disable();
|
|
|
|
+
|
|
|
|
+ res = __copy_from_user_inatomic(fpr, addr,
|
|
|
|
+ sizeof(*fpr));
|
|
|
|
+ if (res)
|
|
|
|
+ goto fault;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Update the hardware register if it is in use by the
|
|
|
|
+ * task in this quantum, in order to avoid having to
|
|
|
|
+ * save & restore the whole vector context.
|
|
|
|
+ */
|
|
|
|
+ if (test_thread_flag(TIF_USEDMSA))
|
|
|
|
+ write_msa_wr(wd, fpr, df);
|
|
|
|
+
|
|
|
|
+ preempt_enable();
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case msa_st_op:
|
|
|
|
+ if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
|
|
|
|
+ goto sigbus;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Update from the hardware register if it is in use by
|
|
|
|
+ * the task in this quantum, in order to avoid having to
|
|
|
|
+ * save & restore the whole vector context.
|
|
|
|
+ */
|
|
|
|
+ preempt_disable();
|
|
|
|
+ if (test_thread_flag(TIF_USEDMSA))
|
|
|
|
+ read_msa_wr(wd, fpr, df);
|
|
|
|
+ preempt_enable();
|
|
|
|
+
|
|
|
|
+ res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
|
|
|
|
+ if (res)
|
|
|
|
+ goto fault;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ goto sigbus;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ compute_return_epc(regs);
|
|
|
|
+ break;
|
|
|
|
+
|
|
#ifndef CONFIG_CPU_MIPSR6
|
|
#ifndef CONFIG_CPU_MIPSR6
|
|
/*
|
|
/*
|
|
* COP2 is available to implementor for application specific use.
|
|
* COP2 is available to implementor for application specific use.
|