|
@@ -1663,12 +1663,6 @@ out:
|
|
|
|
|
|
void fp_unavailable_tm(struct pt_regs *regs)
|
|
|
{
|
|
|
- /*
|
|
|
- * Save the MSR now because tm_reclaim_current() is likely to
|
|
|
- * change it
|
|
|
- */
|
|
|
- unsigned long orig_msr = regs->msr;
|
|
|
-
|
|
|
/* Note: This does not handle any kind of FP laziness. */
|
|
|
|
|
|
TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
|
|
@@ -1694,24 +1688,10 @@ void fp_unavailable_tm(struct pt_regs *regs)
|
|
|
* so we don't want to load the VRs from the thread_struct.
|
|
|
*/
|
|
|
tm_recheckpoint(¤t->thread);
|
|
|
-
|
|
|
- /* If VMX is in use, get the transactional values back */
|
|
|
- if (orig_msr & MSR_VEC) {
|
|
|
- msr_check_and_set(MSR_VEC);
|
|
|
- load_vr_state(¤t->thread.vr_state);
|
|
|
- /* At this point all the VSX state is loaded, so enable it */
|
|
|
- regs->msr |= MSR_VSX;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
void altivec_unavailable_tm(struct pt_regs *regs)
|
|
|
{
|
|
|
- /*
|
|
|
- * Save the MSR now because tm_reclaim_current() is likely to
|
|
|
- * change it
|
|
|
- */
|
|
|
- unsigned long orig_msr = regs->msr;
|
|
|
-
|
|
|
/* See the comments in fp_unavailable_tm(). This function operates
|
|
|
* the same way.
|
|
|
*/
|
|
@@ -1723,12 +1703,6 @@ void altivec_unavailable_tm(struct pt_regs *regs)
|
|
|
current->thread.load_vec = 1;
|
|
|
tm_recheckpoint(¤t->thread);
|
|
|
current->thread.used_vr = 1;
|
|
|
-
|
|
|
- if (orig_msr & MSR_FP) {
|
|
|
- msr_check_and_set(MSR_FP);
|
|
|
- load_fp_state(¤t->thread.fp_state);
|
|
|
- regs->msr |= MSR_VSX;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
void vsx_unavailable_tm(struct pt_regs *regs)
|
|
@@ -1753,10 +1727,6 @@ void vsx_unavailable_tm(struct pt_regs *regs)
|
|
|
current->thread.load_fp = 1;
|
|
|
|
|
|
tm_recheckpoint(¤t->thread);
|
|
|
-
|
|
|
- msr_check_and_set(MSR_FP | MSR_VEC);
|
|
|
- load_fp_state(¤t->thread.fp_state);
|
|
|
- load_vr_state(¤t->thread.vr_state);
|
|
|
}
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
|