|
@@ -1416,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs)
|
|
|
/* This loads and recheckpoints the FP registers from
|
|
|
* thread.fpr[]. They will remain in registers after the
|
|
|
* checkpoint so we don't need to reload them after.
|
|
|
+ * If VMX is in use, the VRs now hold checkpointed values,
|
|
|
+ * so we don't want to load the VRs from the thread_struct.
|
|
|
*/
|
|
|
- tm_recheckpoint(¤t->thread, regs->msr);
|
|
|
+ tm_recheckpoint(¤t->thread, MSR_FP);
|
|
|
+
|
|
|
+ /* If VMX is in use, get the transactional values back */
|
|
|
+ if (regs->msr & MSR_VEC) {
|
|
|
+ do_load_up_transact_altivec(¤t->thread);
|
|
|
+ /* At this point all the VSX state is loaded, so enable it */
|
|
|
+ regs->msr |= MSR_VSX;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_ALTIVEC
|
|
|
void altivec_unavailable_tm(struct pt_regs *regs)
|
|
|
{
|
|
|
/* See the comments in fp_unavailable_tm(). This function operates
|
|
@@ -1432,14 +1440,19 @@ void altivec_unavailable_tm(struct pt_regs *regs)
|
|
|
regs->nip, regs->msr);
|
|
|
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
|
|
|
regs->msr |= MSR_VEC;
|
|
|
- tm_recheckpoint(¤t->thread, regs->msr);
|
|
|
+ tm_recheckpoint(¤t->thread, MSR_VEC);
|
|
|
current->thread.used_vr = 1;
|
|
|
+
|
|
|
+ if (regs->msr & MSR_FP) {
|
|
|
+ do_load_up_transact_fpu(¤t->thread);
|
|
|
+ regs->msr |= MSR_VSX;
|
|
|
+ }
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
-#ifdef CONFIG_VSX
|
|
|
void vsx_unavailable_tm(struct pt_regs *regs)
|
|
|
{
|
|
|
+ unsigned long orig_msr = regs->msr;
|
|
|
+
|
|
|
/* See the comments in fp_unavailable_tm(). This works similarly,
|
|
|
* though we're loading both FP and VEC registers in here.
|
|
|
*
|
|
@@ -1451,16 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs)
|
|
|
"MSR=%lx\n",
|
|
|
regs->nip, regs->msr);
|
|
|
|
|
|
+ current->thread.used_vsr = 1;
|
|
|
+
|
|
|
+ /* If FP and VMX are already loaded, we have all the state we need */
|
|
|
+ if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
|
|
|
+ regs->msr |= MSR_VSX;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/* This reclaims FP and/or VR regs if they're already enabled */
|
|
|
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
|
|
|
|
|
|
regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
|
|
|
MSR_VSX;
|
|
|
- /* This loads & recheckpoints FP and VRs. */
|
|
|
- tm_recheckpoint(¤t->thread, regs->msr);
|
|
|
- current->thread.used_vsr = 1;
|
|
|
+
|
|
|
+ /* This loads & recheckpoints FP and VRs; but we have
|
|
|
+ * to be sure not to overwrite previously-valid state.
|
|
|
+ */
|
|
|
+ tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr);
|
|
|
+
|
|
|
+ if (orig_msr & MSR_FP)
|
|
|
+ do_load_up_transact_fpu(¤t->thread);
|
|
|
+ if (orig_msr & MSR_VEC)
|
|
|
+ do_load_up_transact_altivec(¤t->thread);
|
|
|
}
|
|
|
-#endif
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
|
|
|
void performance_monitor_exception(struct pt_regs *regs)
|