|
@@ -38,6 +38,7 @@
|
|
|
#include <asm/vdso.h>
|
|
|
#include <asm/dsp.h>
|
|
|
#include <asm/inst.h>
|
|
|
+#include <asm/msa.h>
|
|
|
|
|
|
#include "signal-common.h"
|
|
|
|
|
@@ -124,6 +125,168 @@ static int restore_hw_fp_context(void __user *sc)
|
|
|
return _restore_fp_context(fpregs, csr);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Extended context handling.
|
|
|
+ */
|
|
|
+
|
|
|
+static inline void __user *sc_to_extcontext(void __user *sc)
|
|
|
+{
|
|
|
+ struct ucontext __user *uc;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We can just pretend the sigcontext is always embedded in a struct
|
|
|
+ * ucontext here, because the offset from sigcontext to extended
|
|
|
+ * context is the same in the struct sigframe case.
|
|
|
+ */
|
|
|
+ uc = container_of(sc, struct ucontext, uc_mcontext);
|
|
|
+ return &uc->uc_extcontext;
|
|
|
+}
|
|
|
+
|
|
|
+static int save_msa_extcontext(void __user *buf)
|
|
|
+{
|
|
|
+ struct msa_extcontext __user *msa = buf;
|
|
|
+ uint64_t val;
|
|
|
+ int i, err;
|
|
|
+
|
|
|
+ if (!thread_msa_context_live())
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ensure that we can't lose the live MSA context between checking
|
|
|
+ * for it & writing it to memory.
|
|
|
+ */
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ if (is_msa_enabled()) {
|
|
|
+ /*
|
|
|
+ * There are no EVA versions of the vector register load/store
|
|
|
+ * instructions, so MSA context has to be saved to kernel memory
|
|
|
+ * and then copied to user memory. The save to kernel memory
|
|
|
+ * should already have been done when handling scalar FP
|
|
|
+ * context.
|
|
|
+ */
|
|
|
+ BUG_ON(config_enabled(CONFIG_EVA));
|
|
|
+
|
|
|
+ err = __put_user(read_msa_csr(), &msa->csr);
|
|
|
+ err |= _save_msa_all_upper(&msa->wr);
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
+ } else {
|
|
|
+ preempt_enable();
|
|
|
+
|
|
|
+ err = __put_user(current->thread.fpu.msacsr, &msa->csr);
|
|
|
+
|
|
|
+ for (i = 0; i < NUM_FPU_REGS; i++) {
|
|
|
+ val = get_fpr64(¤t->thread.fpu.fpr[i], 1);
|
|
|
+ err |= __put_user(val, &msa->wr[i]);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
|
|
|
+ err |= __put_user(sizeof(*msa), &msa->ext.size);
|
|
|
+
|
|
|
+ return err ? -EFAULT : sizeof(*msa);
|
|
|
+}
|
|
|
+
|
|
|
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
|
|
|
+{
|
|
|
+ struct msa_extcontext __user *msa = buf;
|
|
|
+ unsigned long long val;
|
|
|
+ unsigned int csr;
|
|
|
+ int i, err;
|
|
|
+
|
|
|
+ if (size != sizeof(*msa))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ err = get_user(csr, &msa->csr);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ if (is_msa_enabled()) {
|
|
|
+ /*
|
|
|
+ * There are no EVA versions of the vector register load/store
|
|
|
+ * instructions, so MSA context has to be copied to kernel
|
|
|
+ * memory and later loaded to registers. The same is true of
|
|
|
+ * scalar FP context, so FPU & MSA should have already been
|
|
|
+ * disabled whilst handling scalar FP context.
|
|
|
+ */
|
|
|
+ BUG_ON(config_enabled(CONFIG_EVA));
|
|
|
+
|
|
|
+ write_msa_csr(csr);
|
|
|
+ err |= _restore_msa_all_upper(&msa->wr);
|
|
|
+ preempt_enable();
|
|
|
+ } else {
|
|
|
+ preempt_enable();
|
|
|
+
|
|
|
+ current->thread.fpu.msacsr = csr;
|
|
|
+
|
|
|
+ for (i = 0; i < NUM_FPU_REGS; i++) {
|
|
|
+ err |= __get_user(val, &msa->wr[i]);
|
|
|
+ set_fpr64(¤t->thread.fpu.fpr[i], 1, val);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int save_extcontext(void __user *buf)
|
|
|
+{
|
|
|
+ int sz;
|
|
|
+
|
|
|
+ sz = save_msa_extcontext(buf);
|
|
|
+ if (sz < 0)
|
|
|
+ return sz;
|
|
|
+ buf += sz;
|
|
|
+
|
|
|
+ /* If no context was saved then trivially return */
|
|
|
+ if (!sz)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Write the end marker */
|
|
|
+ if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ sz += sizeof(((struct extcontext *)NULL)->magic);
|
|
|
+ return sz;
|
|
|
+}
|
|
|
+
|
|
|
+static int restore_extcontext(void __user *buf)
|
|
|
+{
|
|
|
+ struct extcontext ext;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ err = __get_user(ext.magic, (unsigned int *)buf);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ if (ext.magic == END_EXTCONTEXT_MAGIC)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ err = __get_user(ext.size, (unsigned int *)(buf
|
|
|
+ + offsetof(struct extcontext, size)));
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ switch (ext.magic) {
|
|
|
+ case MSA_EXTCONTEXT_MAGIC:
|
|
|
+ err = restore_msa_extcontext(buf, ext.size);
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ err = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ buf += ext.size;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Helper routines
|
|
|
*/
|
|
@@ -133,20 +296,17 @@ int protected_save_fp_context(void __user *sc)
|
|
|
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
|
|
|
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
|
|
|
uint32_t __user *used_math = sc + abi->off_sc_used_math;
|
|
|
- unsigned int used;
|
|
|
+ unsigned int used, ext_sz;
|
|
|
int err;
|
|
|
|
|
|
used = used_math() ? USED_FP : 0;
|
|
|
- if (used) {
|
|
|
- if (!test_thread_flag(TIF_32BIT_FPREGS))
|
|
|
- used |= USED_FR1;
|
|
|
- if (test_thread_flag(TIF_HYBRID_FPREGS))
|
|
|
- used |= USED_HYBRID_FPRS;
|
|
|
- }
|
|
|
+ if (!used)
|
|
|
+ goto fp_done;
|
|
|
|
|
|
- err = __put_user(used, used_math);
|
|
|
- if (err || !(used & USED_FP))
|
|
|
- return err;
|
|
|
+ if (!test_thread_flag(TIF_32BIT_FPREGS))
|
|
|
+ used |= USED_FR1;
|
|
|
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
|
|
|
+ used |= USED_HYBRID_FPRS;
|
|
|
|
|
|
/*
|
|
|
* EVA does not have userland equivalents of ldc1 or sdc1, so
|
|
@@ -171,10 +331,16 @@ int protected_save_fp_context(void __user *sc)
|
|
|
__put_user(0, &fpregs[31]) |
|
|
|
__put_user(0, csr);
|
|
|
if (err)
|
|
|
- break; /* really bad sigcontext */
|
|
|
+ return err; /* really bad sigcontext */
|
|
|
}
|
|
|
|
|
|
- return err;
|
|
|
+fp_done:
|
|
|
+ ext_sz = err = save_extcontext(sc_to_extcontext(sc));
|
|
|
+ if (err < 0)
|
|
|
+ return err;
|
|
|
+ used |= ext_sz ? USED_EXTCONTEXT : 0;
|
|
|
+
|
|
|
+ return __put_user(used, used_math);
|
|
|
}
|
|
|
|
|
|
int protected_restore_fp_context(void __user *sc)
|
|
@@ -184,7 +350,7 @@ int protected_restore_fp_context(void __user *sc)
|
|
|
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
|
|
|
uint32_t __user *used_math = sc + abi->off_sc_used_math;
|
|
|
unsigned int used;
|
|
|
- int err, sig, tmp __maybe_unused;
|
|
|
+ int err, sig = 0, tmp __maybe_unused;
|
|
|
|
|
|
err = __get_user(used, used_math);
|
|
|
conditional_used_math(used & USED_FP);
|
|
@@ -193,10 +359,12 @@ int protected_restore_fp_context(void __user *sc)
|
|
|
* The signal handler may have used FPU; give it up if the program
|
|
|
* doesn't want it following sigreturn.
|
|
|
*/
|
|
|
- if (err || !(used & USED_FP)) {
|
|
|
+ if (err || !(used & USED_FP))
|
|
|
lose_fpu(0);
|
|
|
+ if (err)
|
|
|
return err;
|
|
|
- }
|
|
|
+ if (!(used & USED_FP))
|
|
|
+ goto fp_done;
|
|
|
|
|
|
err = sig = fpcsr_pending(csr);
|
|
|
if (err < 0)
|
|
@@ -229,6 +397,10 @@ int protected_restore_fp_context(void __user *sc)
|
|
|
break; /* really bad sigcontext */
|
|
|
}
|
|
|
|
|
|
+fp_done:
|
|
|
+ if (used & USED_EXTCONTEXT)
|
|
|
+ err |= restore_extcontext(sc_to_extcontext(sc));
|
|
|
+
|
|
|
return err ?: sig;
|
|
|
}
|
|
|
|
|
@@ -268,6 +440,28 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static size_t extcontext_max_size(void)
|
|
|
+{
|
|
|
+ size_t sz = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The assumption here is that between this point & the point at which
|
|
|
+ * the extended context is saved the size of the context should only
|
|
|
+ * ever be able to shrink (if the task is preempted), but never grow.
|
|
|
+ * That is, what this function returns is an upper bound on the size of
|
|
|
+ * the extended context for the current task at the current time.
|
|
|
+ */
|
|
|
+
|
|
|
+ if (thread_msa_context_live())
|
|
|
+ sz += sizeof(struct msa_extcontext);
|
|
|
+
|
|
|
+ /* If any context is saved then we'll append the end marker */
|
|
|
+ if (sz)
|
|
|
+ sz += sizeof(((struct extcontext *)NULL)->magic);
|
|
|
+
|
|
|
+ return sz;
|
|
|
+}
|
|
|
+
|
|
|
int fpcsr_pending(unsigned int __user *fpcsr)
|
|
|
{
|
|
|
int err, sig = 0;
|
|
@@ -324,6 +518,9 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
|
|
{
|
|
|
unsigned long sp;
|
|
|
|
|
|
+ /* Leave space for potential extended context */
|
|
|
+ frame_size += extcontext_max_size();
|
|
|
+
|
|
|
/* Default to using normal stack */
|
|
|
sp = regs->regs[29];
|
|
|
|