|
@@ -37,6 +37,7 @@
|
|
|
#include <linux/kdebug.h>
|
|
|
#include <linux/ratelimit.h>
|
|
|
#include <linux/context_tracking.h>
|
|
|
+#include <linux/smp.h>
|
|
|
|
|
|
#include <asm/emulated_ops.h>
|
|
|
#include <asm/pgtable.h>
|
|
@@ -699,6 +700,187 @@ void SMIException(struct pt_regs *regs)
|
|
|
die("System Management Interrupt", regs, SIGABRT);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_VSX
|
|
|
+static void p9_hmi_special_emu(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ unsigned int ra, rb, t, i, sel, instr, rc;
|
|
|
+ const void __user *addr;
|
|
|
+ u8 vbuf[16], *vdst;
|
|
|
+ unsigned long ea, msr, msr_mask;
|
|
|
+ bool swap;
|
|
|
+
|
|
|
+ if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * lxvb16x opcode: 0x7c0006d8
|
|
|
+ * lxvd2x opcode: 0x7c000698
|
|
|
+ * lxvh8x opcode: 0x7c000658
|
|
|
+ * lxvw4x opcode: 0x7c000618
|
|
|
+ */
|
|
|
+ if ((instr & 0xfc00073e) != 0x7c000618) {
|
|
|
+ pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
|
|
|
+ " instr=%08x\n",
|
|
|
+ smp_processor_id(), current->comm, current->pid,
|
|
|
+ regs->nip, instr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Grab vector registers into the task struct */
|
|
|
+ msr = regs->msr; /* Grab msr before we flush the bits */
|
|
|
+ flush_vsx_to_thread(current);
|
|
|
+ enable_kernel_altivec();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Is userspace running with a different endian (this is rare but
|
|
|
+ * not impossible)
|
|
|
+ */
|
|
|
+ swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
|
|
|
+
|
|
|
+ /* Decode the instruction */
|
|
|
+ ra = (instr >> 16) & 0x1f;
|
|
|
+ rb = (instr >> 11) & 0x1f;
|
|
|
+ t = (instr >> 21) & 0x1f;
|
|
|
+ if (instr & 1)
|
|
|
+ vdst = (u8 *)¤t->thread.vr_state.vr[t];
|
|
|
+ else
|
|
|
+ vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
|
|
|
+
|
|
|
+ /* Grab the vector address */
|
|
|
+ ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
|
|
|
+ if (is_32bit_task())
|
|
|
+ ea &= 0xfffffffful;
|
|
|
+ addr = (__force const void __user *)ea;
|
|
|
+
|
|
|
+ /* Check it */
|
|
|
+ if (!access_ok(VERIFY_READ, addr, 16)) {
|
|
|
+ pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
|
|
|
+ " instr=%08x addr=%016lx\n",
|
|
|
+ smp_processor_id(), current->comm, current->pid,
|
|
|
+ regs->nip, instr, (unsigned long)addr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Read the vector */
|
|
|
+ rc = 0;
|
|
|
+ if ((unsigned long)addr & 0xfUL)
|
|
|
+ /* unaligned case */
|
|
|
+ rc = __copy_from_user_inatomic(vbuf, addr, 16);
|
|
|
+ else
|
|
|
+ __get_user_atomic_128_aligned(vbuf, addr, rc);
|
|
|
+ if (rc) {
|
|
|
+ pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
|
|
|
+ " instr=%08x addr=%016lx\n",
|
|
|
+ smp_processor_id(), current->comm, current->pid,
|
|
|
+ regs->nip, instr, (unsigned long)addr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
|
|
|
+ " instr=%08x addr=%016lx\n",
|
|
|
+ smp_processor_id(), current->comm, current->pid, regs->nip,
|
|
|
+ instr, (unsigned long) addr);
|
|
|
+
|
|
|
+ /* Grab instruction "selector" */
|
|
|
+ sel = (instr >> 6) & 3;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check to make sure the facility is actually enabled. This
|
|
|
+ * could happen if we get a false positive hit.
|
|
|
+ *
|
|
|
+ * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
|
|
|
+ * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
|
|
|
+ */
|
|
|
+ msr_mask = MSR_VSX;
|
|
|
+ if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
|
|
|
+ msr_mask = MSR_VEC;
|
|
|
+ if (!(msr & msr_mask)) {
|
|
|
+ pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
|
|
|
+ " instr=%08x msr:%016lx\n",
|
|
|
+ smp_processor_id(), current->comm, current->pid,
|
|
|
+ regs->nip, instr, msr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Do logging here before we modify sel based on endian */
|
|
|
+ switch (sel) {
|
|
|
+ case 0: /* lxvw4x */
|
|
|
+ PPC_WARN_EMULATED(lxvw4x, regs);
|
|
|
+ break;
|
|
|
+ case 1: /* lxvh8x */
|
|
|
+ PPC_WARN_EMULATED(lxvh8x, regs);
|
|
|
+ break;
|
|
|
+ case 2: /* lxvd2x */
|
|
|
+ PPC_WARN_EMULATED(lxvd2x, regs);
|
|
|
+ break;
|
|
|
+ case 3: /* lxvb16x */
|
|
|
+ PPC_WARN_EMULATED(lxvb16x, regs);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef __LITTLE_ENDIAN__
|
|
|
+ /*
|
|
|
+ * An LE kernel stores the vector in the task struct as an LE
|
|
|
+ * byte array (effectively swapping both the components and
|
|
|
+ * the content of the components). Those instructions expect
|
|
|
+ * the components to remain in ascending address order, so we
|
|
|
+ * swap them back.
|
|
|
+ *
|
|
|
+ * If we are running a BE user space, the expectation is that
|
|
|
+ * of a simple memcpy, so forcing the emulation to look like
|
|
|
+ * a lxvb16x should do the trick.
|
|
|
+ */
|
|
|
+ if (swap)
|
|
|
+ sel = 3;
|
|
|
+
|
|
|
+ switch (sel) {
|
|
|
+ case 0: /* lxvw4x */
|
|
|
+ for (i = 0; i < 4; i++)
|
|
|
+ ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
|
|
|
+ break;
|
|
|
+ case 1: /* lxvh8x */
|
|
|
+ for (i = 0; i < 8; i++)
|
|
|
+ ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
|
|
|
+ break;
|
|
|
+ case 2: /* lxvd2x */
|
|
|
+ for (i = 0; i < 2; i++)
|
|
|
+ ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
|
|
|
+ break;
|
|
|
+ case 3: /* lxvb16x */
|
|
|
+ for (i = 0; i < 16; i++)
|
|
|
+ vdst[i] = vbuf[15-i];
|
|
|
+ break;
|
|
|
+ }
|
|
|
+#else /* __LITTLE_ENDIAN__ */
|
|
|
+ /* On a big endian kernel, a BE userspace only needs a memcpy */
|
|
|
+ if (!swap)
|
|
|
+ sel = 3;
|
|
|
+
|
|
|
+ /* Otherwise, we need to swap the content of the components */
|
|
|
+ switch (sel) {
|
|
|
+ case 0: /* lxvw4x */
|
|
|
+ for (i = 0; i < 4; i++)
|
|
|
+ ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
|
|
|
+ break;
|
|
|
+ case 1: /* lxvh8x */
|
|
|
+ for (i = 0; i < 8; i++)
|
|
|
+ ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
|
|
|
+ break;
|
|
|
+ case 2: /* lxvd2x */
|
|
|
+ for (i = 0; i < 2; i++)
|
|
|
+ ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
|
|
|
+ break;
|
|
|
+ case 3: /* lxvb16x */
|
|
|
+ memcpy(vdst, vbuf, 16);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+#endif /* !__LITTLE_ENDIAN__ */
|
|
|
+
|
|
|
+ /* Go to next instruction */
|
|
|
+ regs->nip += 4;
|
|
|
+}
|
|
|
+#endif /* CONFIG_VSX */
|
|
|
+
|
|
|
void handle_hmi_exception(struct pt_regs *regs)
|
|
|
{
|
|
|
struct pt_regs *old_regs;
|
|
@@ -706,6 +888,21 @@ void handle_hmi_exception(struct pt_regs *regs)
|
|
|
old_regs = set_irq_regs(regs);
|
|
|
irq_enter();
|
|
|
|
|
|
+#ifdef CONFIG_VSX
|
|
|
+ /* Real mode flagged P9 special emu is needed */
|
|
|
+ if (local_paca->hmi_p9_special_emu) {
|
|
|
+ local_paca->hmi_p9_special_emu = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We don't want to take page faults while doing the
|
|
|
+ * emulation, we just replay the instruction if necessary.
|
|
|
+ */
|
|
|
+ pagefault_disable();
|
|
|
+ p9_hmi_special_emu(regs);
|
|
|
+ pagefault_enable();
|
|
|
+ }
|
|
|
+#endif /* CONFIG_VSX */
|
|
|
+
|
|
|
if (ppc_md.handle_hmi_exception)
|
|
|
ppc_md.handle_hmi_exception(regs);
|
|
|
|
|
@@ -1924,6 +2121,10 @@ struct ppc_emulated ppc_emulated = {
|
|
|
WARN_EMULATED_SETUP(mfdscr),
|
|
|
WARN_EMULATED_SETUP(mtdscr),
|
|
|
WARN_EMULATED_SETUP(lq_stq),
|
|
|
+ WARN_EMULATED_SETUP(lxvw4x),
|
|
|
+ WARN_EMULATED_SETUP(lxvh8x),
|
|
|
+ WARN_EMULATED_SETUP(lxvd2x),
|
|
|
+ WARN_EMULATED_SETUP(lxvb16x),
|
|
|
#endif
|
|
|
};
|
|
|
|