|
@@ -187,9 +187,22 @@ void enable_kernel_fp(void)
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_kernel_fp);
|
|
|
+
|
|
|
+static int restore_fp(struct task_struct *tsk) {
|
|
|
+ if (tsk->thread.load_fp) {
|
|
|
+ load_fp_state(¤t->thread.fp_state);
|
|
|
+ current->thread.load_fp++;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#else
|
|
|
+static int restore_fp(struct task_struct *tsk) { return 0; }
|
|
|
#endif /* CONFIG_PPC_FPU */
|
|
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
+#define loadvec(thr) ((thr).load_vec)
|
|
|
+
|
|
|
void giveup_altivec(struct task_struct *tsk)
|
|
|
{
|
|
|
check_if_tm_restore_required(tsk);
|
|
@@ -229,6 +242,21 @@ void flush_altivec_to_thread(struct task_struct *tsk)
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
|
|
+
|
|
|
+static int restore_altivec(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
|
|
|
+ load_vr_state(&tsk->thread.vr_state);
|
|
|
+ tsk->thread.used_vr = 1;
|
|
|
+ tsk->thread.load_vec++;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#else
|
|
|
+#define loadvec(thr) 0
|
|
|
+static inline int restore_altivec(struct task_struct *tsk) { return 0; }
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
|
#ifdef CONFIG_VSX
|
|
@@ -275,6 +303,18 @@ void flush_vsx_to_thread(struct task_struct *tsk)
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
|
|
|
+
|
|
|
+static int restore_vsx(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ if (cpu_has_feature(CPU_FTR_VSX)) {
|
|
|
+ tsk->thread.used_vsr = 1;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline int restore_vsx(struct task_struct *tsk) { return 0; }
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
|
#ifdef CONFIG_SPE
|
|
@@ -374,6 +414,36 @@ void giveup_all(struct task_struct *tsk)
|
|
|
}
|
|
|
EXPORT_SYMBOL(giveup_all);
|
|
|
|
|
|
+void restore_math(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ unsigned long msr;
|
|
|
+
|
|
|
+ if (!current->thread.load_fp && !loadvec(current->thread))
|
|
|
+ return;
|
|
|
+
|
|
|
+ msr = regs->msr;
|
|
|
+ msr_check_and_set(msr_all_available);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Only reload if the bit is not set in the user MSR, the bit BEING set
|
|
|
+ * indicates that the registers are hot
|
|
|
+ */
|
|
|
+ if ((!(msr & MSR_FP)) && restore_fp(current))
|
|
|
+ msr |= MSR_FP | current->thread.fpexc_mode;
|
|
|
+
|
|
|
+ if ((!(msr & MSR_VEC)) && restore_altivec(current))
|
|
|
+ msr |= MSR_VEC;
|
|
|
+
|
|
|
+ if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
|
|
|
+ restore_vsx(current)) {
|
|
|
+ msr |= MSR_VSX;
|
|
|
+ }
|
|
|
+
|
|
|
+ msr_check_and_clear(msr_all_available);
|
|
|
+
|
|
|
+ regs->msr = msr;
|
|
|
+}
|
|
|
+
|
|
|
void flush_all_to_thread(struct task_struct *tsk)
|
|
|
{
|
|
|
if (tsk->thread.regs) {
|
|
@@ -832,17 +902,9 @@ void restore_tm_state(struct pt_regs *regs)
|
|
|
|
|
|
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
|
|
|
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
|
|
|
- if (msr_diff & MSR_FP) {
|
|
|
- msr_check_and_set(MSR_FP);
|
|
|
- load_fp_state(¤t->thread.fp_state);
|
|
|
- msr_check_and_clear(MSR_FP);
|
|
|
- regs->msr |= current->thread.fpexc_mode;
|
|
|
- }
|
|
|
- if (msr_diff & MSR_VEC) {
|
|
|
- msr_check_and_set(MSR_VEC);
|
|
|
- load_vr_state(¤t->thread.vr_state);
|
|
|
- msr_check_and_clear(MSR_VEC);
|
|
|
- }
|
|
|
+
|
|
|
+ restore_math(regs);
|
|
|
+
|
|
|
regs->msr |= msr_diff;
|
|
|
}
|
|
|
|
|
@@ -1006,6 +1068,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
|
|
batch->active = 1;
|
|
|
}
|
|
|
+
|
|
|
+ if (current_thread_info()->task->thread.regs)
|
|
|
+ restore_math(current_thread_info()->task->thread.regs);
|
|
|
+
|
|
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
|
|
|
|
|
return last;
|