|
@@ -67,15 +67,8 @@
|
|
|
|
|
|
extern unsigned long _get_SP(void);
|
|
|
|
|
|
-#ifndef CONFIG_SMP
|
|
|
-struct task_struct *last_task_used_math = NULL;
|
|
|
-struct task_struct *last_task_used_altivec = NULL;
|
|
|
-struct task_struct *last_task_used_vsx = NULL;
|
|
|
-struct task_struct *last_task_used_spe = NULL;
|
|
|
-#endif
|
|
|
-
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
-void giveup_fpu_maybe_transactional(struct task_struct *tsk)
|
|
|
+static void check_if_tm_restore_required(struct task_struct *tsk)
|
|
|
{
|
|
|
/*
|
|
|
* If we are saving the current thread's registers, and the
|
|
@@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
|
|
|
tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
|
|
|
set_thread_flag(TIF_RESTORE_TM);
|
|
|
}
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
|
|
|
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
+
|
|
|
+bool strict_msr_control;
|
|
|
+EXPORT_SYMBOL(strict_msr_control);
|
|
|
+
|
|
|
+static int __init enable_strict_msr_control(char *str)
|
|
|
+{
|
|
|
+ strict_msr_control = true;
|
|
|
+ pr_info("Enabling strict facility control\n");
|
|
|
|
|
|
- giveup_fpu(tsk);
|
|
|
+ return 0;
|
|
|
}
|
|
|
+early_param("ppc_strict_facility_enable", enable_strict_msr_control);
|
|
|
|
|
|
-void giveup_altivec_maybe_transactional(struct task_struct *tsk)
|
|
|
+void msr_check_and_set(unsigned long bits)
|
|
|
{
|
|
|
- /*
|
|
|
- * If we are saving the current thread's registers, and the
|
|
|
- * thread is in a transactional state, set the TIF_RESTORE_TM
|
|
|
- * bit so that we know to restore the registers before
|
|
|
- * returning to userspace.
|
|
|
- */
|
|
|
- if (tsk == current && tsk->thread.regs &&
|
|
|
- MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
|
|
- !test_thread_flag(TIF_RESTORE_TM)) {
|
|
|
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
|
|
|
- set_thread_flag(TIF_RESTORE_TM);
|
|
|
- }
|
|
|
+ unsigned long oldmsr = mfmsr();
|
|
|
+ unsigned long newmsr;
|
|
|
|
|
|
- giveup_altivec(tsk);
|
|
|
+ newmsr = oldmsr | bits;
|
|
|
+
|
|
|
+#ifdef CONFIG_VSX
|
|
|
+ if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
|
|
|
+ newmsr |= MSR_VSX;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (oldmsr != newmsr)
|
|
|
+ mtmsr_isync(newmsr);
|
|
|
}
|
|
|
|
|
|
-#else
|
|
|
-#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
|
|
|
-#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
|
|
|
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
+void __msr_check_and_clear(unsigned long bits)
|
|
|
+{
|
|
|
+ unsigned long oldmsr = mfmsr();
|
|
|
+ unsigned long newmsr;
|
|
|
+
|
|
|
+ newmsr = oldmsr & ~bits;
|
|
|
+
|
|
|
+#ifdef CONFIG_VSX
|
|
|
+ if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
|
|
|
+ newmsr &= ~MSR_VSX;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (oldmsr != newmsr)
|
|
|
+ mtmsr_isync(newmsr);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__msr_check_and_clear);
|
|
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
+void giveup_fpu(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ check_if_tm_restore_required(tsk);
|
|
|
+
|
|
|
+ msr_check_and_set(MSR_FP);
|
|
|
+ __giveup_fpu(tsk);
|
|
|
+ msr_check_and_clear(MSR_FP);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(giveup_fpu);
|
|
|
+
|
|
|
/*
|
|
|
* Make sure the floating-point register state in the
|
|
|
* the thread_struct is up to date for task tsk.
|
|
@@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk)
|
|
|
*/
|
|
|
preempt_disable();
|
|
|
if (tsk->thread.regs->msr & MSR_FP) {
|
|
|
-#ifdef CONFIG_SMP
|
|
|
/*
|
|
|
* This should only ever be called for current or
|
|
|
* for a stopped child process. Since we save away
|
|
|
- * the FP register state on context switch on SMP,
|
|
|
+ * the FP register state on context switch,
|
|
|
* there is something wrong if a stopped child appears
|
|
|
* to still have its FP state in the CPU registers.
|
|
|
*/
|
|
|
BUG_ON(tsk != current);
|
|
|
-#endif
|
|
|
- giveup_fpu_maybe_transactional(tsk);
|
|
|
+ giveup_fpu(tsk);
|
|
|
}
|
|
|
preempt_enable();
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_fp_to_thread);
|
|
|
-#endif /* CONFIG_PPC_FPU */
|
|
|
|
|
|
void enable_kernel_fp(void)
|
|
|
{
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
|
|
|
- giveup_fpu_maybe_transactional(current);
|
|
|
- else
|
|
|
- giveup_fpu(NULL); /* just enables FP for kernel */
|
|
|
-#else
|
|
|
- giveup_fpu_maybe_transactional(last_task_used_math);
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
+ msr_check_and_set(MSR_FP);
|
|
|
+
|
|
|
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
|
|
|
+ check_if_tm_restore_required(current);
|
|
|
+ __giveup_fpu(current);
|
|
|
+ }
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_kernel_fp);
|
|
|
+#endif /* CONFIG_PPC_FPU */
|
|
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
+void giveup_altivec(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ check_if_tm_restore_required(tsk);
|
|
|
+
|
|
|
+ msr_check_and_set(MSR_VEC);
|
|
|
+ __giveup_altivec(tsk);
|
|
|
+ msr_check_and_clear(MSR_VEC);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(giveup_altivec);
|
|
|
+
|
|
|
void enable_kernel_altivec(void)
|
|
|
{
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
|
|
|
- giveup_altivec_maybe_transactional(current);
|
|
|
- else
|
|
|
- giveup_altivec_notask();
|
|
|
-#else
|
|
|
- giveup_altivec_maybe_transactional(last_task_used_altivec);
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
+ msr_check_and_set(MSR_VEC);
|
|
|
+
|
|
|
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
|
|
|
+ check_if_tm_restore_required(current);
|
|
|
+ __giveup_altivec(current);
|
|
|
+ }
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_kernel_altivec);
|
|
|
|
|
@@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk)
|
|
|
if (tsk->thread.regs) {
|
|
|
preempt_disable();
|
|
|
if (tsk->thread.regs->msr & MSR_VEC) {
|
|
|
-#ifdef CONFIG_SMP
|
|
|
BUG_ON(tsk != current);
|
|
|
-#endif
|
|
|
- giveup_altivec_maybe_transactional(tsk);
|
|
|
+ giveup_altivec(tsk);
|
|
|
}
|
|
|
preempt_enable();
|
|
|
}
|
|
@@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
|
#ifdef CONFIG_VSX
|
|
|
-void enable_kernel_vsx(void)
|
|
|
+void giveup_vsx(struct task_struct *tsk)
|
|
|
{
|
|
|
- WARN_ON(preemptible());
|
|
|
+ check_if_tm_restore_required(tsk);
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
|
|
|
- giveup_vsx(current);
|
|
|
- else
|
|
|
- giveup_vsx(NULL); /* just enable vsx for kernel - force */
|
|
|
-#else
|
|
|
- giveup_vsx(last_task_used_vsx);
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
+ msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
|
|
|
+ if (tsk->thread.regs->msr & MSR_FP)
|
|
|
+ __giveup_fpu(tsk);
|
|
|
+ if (tsk->thread.regs->msr & MSR_VEC)
|
|
|
+ __giveup_altivec(tsk);
|
|
|
+ __giveup_vsx(tsk);
|
|
|
+ msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(enable_kernel_vsx);
|
|
|
+EXPORT_SYMBOL(giveup_vsx);
|
|
|
|
|
|
-void giveup_vsx(struct task_struct *tsk)
|
|
|
+void enable_kernel_vsx(void)
|
|
|
{
|
|
|
- giveup_fpu_maybe_transactional(tsk);
|
|
|
- giveup_altivec_maybe_transactional(tsk);
|
|
|
- __giveup_vsx(tsk);
|
|
|
+ WARN_ON(preemptible());
|
|
|
+
|
|
|
+ msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
|
|
|
+
|
|
|
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
|
|
|
+ check_if_tm_restore_required(current);
|
|
|
+ if (current->thread.regs->msr & MSR_FP)
|
|
|
+ __giveup_fpu(current);
|
|
|
+ if (current->thread.regs->msr & MSR_VEC)
|
|
|
+ __giveup_altivec(current);
|
|
|
+ __giveup_vsx(current);
|
|
|
+ }
|
|
|
}
|
|
|
-EXPORT_SYMBOL(giveup_vsx);
|
|
|
+EXPORT_SYMBOL(enable_kernel_vsx);
|
|
|
|
|
|
void flush_vsx_to_thread(struct task_struct *tsk)
|
|
|
{
|
|
|
if (tsk->thread.regs) {
|
|
|
preempt_disable();
|
|
|
if (tsk->thread.regs->msr & MSR_VSX) {
|
|
|
-#ifdef CONFIG_SMP
|
|
|
BUG_ON(tsk != current);
|
|
|
-#endif
|
|
|
giveup_vsx(tsk);
|
|
|
}
|
|
|
preempt_enable();
|
|
@@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
|
#ifdef CONFIG_SPE
|
|
|
+void giveup_spe(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ check_if_tm_restore_required(tsk);
|
|
|
+
|
|
|
+ msr_check_and_set(MSR_SPE);
|
|
|
+ __giveup_spe(tsk);
|
|
|
+ msr_check_and_clear(MSR_SPE);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(giveup_spe);
|
|
|
|
|
|
void enable_kernel_spe(void)
|
|
|
{
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
|
|
|
- giveup_spe(current);
|
|
|
- else
|
|
|
- giveup_spe(NULL); /* just enable SPE for kernel - force */
|
|
|
-#else
|
|
|
- giveup_spe(last_task_used_spe);
|
|
|
-#endif /* __SMP __ */
|
|
|
+ msr_check_and_set(MSR_SPE);
|
|
|
+
|
|
|
+ if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
|
|
|
+ check_if_tm_restore_required(current);
|
|
|
+ __giveup_spe(current);
|
|
|
+ }
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_kernel_spe);
|
|
|
|
|
@@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
|
|
|
if (tsk->thread.regs) {
|
|
|
preempt_disable();
|
|
|
if (tsk->thread.regs->msr & MSR_SPE) {
|
|
|
-#ifdef CONFIG_SMP
|
|
|
BUG_ON(tsk != current);
|
|
|
-#endif
|
|
|
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
|
|
|
giveup_spe(tsk);
|
|
|
}
|
|
@@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk)
|
|
|
}
|
|
|
#endif /* CONFIG_SPE */
|
|
|
|
|
|
-#ifndef CONFIG_SMP
|
|
|
-/*
|
|
|
- * If we are doing lazy switching of CPU state (FP, altivec or SPE),
|
|
|
- * and the current task has some state, discard it.
|
|
|
- */
|
|
|
-void discard_lazy_cpu_state(void)
|
|
|
+static unsigned long msr_all_available;
|
|
|
+
|
|
|
+static int __init init_msr_all_available(void)
|
|
|
{
|
|
|
- preempt_disable();
|
|
|
- if (last_task_used_math == current)
|
|
|
- last_task_used_math = NULL;
|
|
|
+#ifdef CONFIG_PPC_FPU
|
|
|
+ msr_all_available |= MSR_FP;
|
|
|
+#endif
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
- if (last_task_used_altivec == current)
|
|
|
- last_task_used_altivec = NULL;
|
|
|
-#endif /* CONFIG_ALTIVEC */
|
|
|
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
+ msr_all_available |= MSR_VEC;
|
|
|
+#endif
|
|
|
#ifdef CONFIG_VSX
|
|
|
- if (last_task_used_vsx == current)
|
|
|
- last_task_used_vsx = NULL;
|
|
|
-#endif /* CONFIG_VSX */
|
|
|
+ if (cpu_has_feature(CPU_FTR_VSX))
|
|
|
+ msr_all_available |= MSR_VSX;
|
|
|
+#endif
|
|
|
#ifdef CONFIG_SPE
|
|
|
- if (last_task_used_spe == current)
|
|
|
- last_task_used_spe = NULL;
|
|
|
+ if (cpu_has_feature(CPU_FTR_SPE))
|
|
|
+ msr_all_available |= MSR_SPE;
|
|
|
#endif
|
|
|
- preempt_enable();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_initcall(init_msr_all_available);
|
|
|
+
|
|
|
+void giveup_all(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ unsigned long usermsr;
|
|
|
+
|
|
|
+ if (!tsk->thread.regs)
|
|
|
+ return;
|
|
|
+
|
|
|
+ usermsr = tsk->thread.regs->msr;
|
|
|
+
|
|
|
+ if ((usermsr & msr_all_available) == 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ msr_check_and_set(msr_all_available);
|
|
|
+
|
|
|
+#ifdef CONFIG_PPC_FPU
|
|
|
+ if (usermsr & MSR_FP)
|
|
|
+ __giveup_fpu(tsk);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+ if (usermsr & MSR_VEC)
|
|
|
+ __giveup_altivec(tsk);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_VSX
|
|
|
+ if (usermsr & MSR_VSX)
|
|
|
+ __giveup_vsx(tsk);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_SPE
|
|
|
+ if (usermsr & MSR_SPE)
|
|
|
+ __giveup_spe(tsk);
|
|
|
+#endif
|
|
|
+
|
|
|
+ msr_check_and_clear(msr_all_available);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(giveup_all);
|
|
|
+
|
|
|
+void flush_all_to_thread(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ if (tsk->thread.regs) {
|
|
|
+ preempt_disable();
|
|
|
+ BUG_ON(tsk != current);
|
|
|
+ giveup_all(tsk);
|
|
|
+
|
|
|
+#ifdef CONFIG_SPE
|
|
|
+ if (tsk->thread.regs->msr & MSR_SPE)
|
|
|
+ tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
|
|
|
+#endif
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
+ }
|
|
|
}
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
+EXPORT_SYMBOL(flush_all_to_thread);
|
|
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
void do_send_trap(struct pt_regs *regs, unsigned long address,
|
|
@@ -744,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs)
|
|
|
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
|
|
|
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
|
|
|
if (msr_diff & MSR_FP) {
|
|
|
- fp_enable();
|
|
|
+ msr_check_and_set(MSR_FP);
|
|
|
load_fp_state(¤t->thread.fp_state);
|
|
|
+ msr_check_and_clear(MSR_FP);
|
|
|
regs->msr |= current->thread.fpexc_mode;
|
|
|
}
|
|
|
if (msr_diff & MSR_VEC) {
|
|
|
- vec_enable();
|
|
|
+ msr_check_and_set(MSR_VEC);
|
|
|
load_vr_state(¤t->thread.vr_state);
|
|
|
+ msr_check_and_clear(MSR_VEC);
|
|
|
}
|
|
|
regs->msr |= msr_diff;
|
|
|
}
|
|
@@ -760,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs)
|
|
|
#define __switch_to_tm(prev)
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
|
|
|
-struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
- struct task_struct *new)
|
|
|
+static inline void save_sprs(struct thread_struct *t)
|
|
|
{
|
|
|
- struct thread_struct *new_thread, *old_thread;
|
|
|
- struct task_struct *last;
|
|
|
-#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
- struct ppc64_tlb_batch *batch;
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+ if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
|
|
|
+ t->vrsave = mfspr(SPRN_VRSAVE);
|
|
|
#endif
|
|
|
+#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
+ if (cpu_has_feature(CPU_FTR_DSCR))
|
|
|
+ t->dscr = mfspr(SPRN_DSCR);
|
|
|
|
|
|
- WARN_ON(!irqs_disabled());
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
|
+ t->bescr = mfspr(SPRN_BESCR);
|
|
|
+ t->ebbhr = mfspr(SPRN_EBBHR);
|
|
|
+ t->ebbrr = mfspr(SPRN_EBBRR);
|
|
|
|
|
|
- /* Back up the TAR and DSCR across context switches.
|
|
|
- * Note that the TAR is not available for use in the kernel. (To
|
|
|
- * provide this, the TAR should be backed up/restored on exception
|
|
|
- * entry/exit instead, and be in pt_regs. FIXME, this should be in
|
|
|
- * pt_regs anyway (for debug).)
|
|
|
- * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
|
|
|
- * these will change them.
|
|
|
- */
|
|
|
- save_early_sprs(&prev->thread);
|
|
|
+ t->fscr = mfspr(SPRN_FSCR);
|
|
|
|
|
|
- __switch_to_tm(prev);
|
|
|
+ /*
|
|
|
+ * Note that the TAR is not available for use in the kernel.
|
|
|
+ * (To provide this, the TAR should be backed up/restored on
|
|
|
+ * exception entry/exit instead, and be in pt_regs. FIXME,
|
|
|
+ * this should be in pt_regs anyway (for debug).)
|
|
|
+ */
|
|
|
+ t->tar = mfspr(SPRN_TAR);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+}
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /* avoid complexity of lazy save/restore of fpu
|
|
|
- * by just saving it every time we switch out if
|
|
|
- * this task used the fpu during the last quantum.
|
|
|
- *
|
|
|
- * If it tries to use the fpu again, it'll trap and
|
|
|
- * reload its fp regs. So we don't have to do a restore
|
|
|
- * every switch, just a save.
|
|
|
- * -- Cort
|
|
|
- */
|
|
|
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
|
|
|
- giveup_fpu(prev);
|
|
|
+static inline void restore_sprs(struct thread_struct *old_thread,
|
|
|
+ struct thread_struct *new_thread)
|
|
|
+{
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
- /*
|
|
|
- * If the previous thread used altivec in the last quantum
|
|
|
- * (thus changing altivec regs) then save them.
|
|
|
- * We used to check the VRSAVE register but not all apps
|
|
|
- * set it, so we don't rely on it now (and in fact we need
|
|
|
- * to save & restore VSCR even if VRSAVE == 0). -- paulus
|
|
|
- *
|
|
|
- * On SMP we always save/restore altivec regs just to avoid the
|
|
|
- * complexity of changing processors.
|
|
|
- * -- Cort
|
|
|
- */
|
|
|
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
|
|
|
- giveup_altivec(prev);
|
|
|
-#endif /* CONFIG_ALTIVEC */
|
|
|
-#ifdef CONFIG_VSX
|
|
|
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
|
|
|
- /* VMX and FPU registers are already save here */
|
|
|
- __giveup_vsx(prev);
|
|
|
-#endif /* CONFIG_VSX */
|
|
|
-#ifdef CONFIG_SPE
|
|
|
- /*
|
|
|
- * If the previous thread used spe in the last quantum
|
|
|
- * (thus changing spe regs) then save them.
|
|
|
- *
|
|
|
- * On SMP we always save/restore spe regs just to avoid the
|
|
|
- * complexity of changing processors.
|
|
|
- */
|
|
|
- if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
|
|
|
- giveup_spe(prev);
|
|
|
-#endif /* CONFIG_SPE */
|
|
|
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
|
|
|
+ old_thread->vrsave != new_thread->vrsave)
|
|
|
+ mtspr(SPRN_VRSAVE, new_thread->vrsave);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
|
|
|
+ u64 dscr = get_paca()->dscr_default;
|
|
|
+ u64 fscr = old_thread->fscr & ~FSCR_DSCR;
|
|
|
|
|
|
-#else /* CONFIG_SMP */
|
|
|
-#ifdef CONFIG_ALTIVEC
|
|
|
- /* Avoid the trap. On smp this this never happens since
|
|
|
- * we don't set last_task_used_altivec -- Cort
|
|
|
- */
|
|
|
- if (new->thread.regs && last_task_used_altivec == new)
|
|
|
- new->thread.regs->msr |= MSR_VEC;
|
|
|
-#endif /* CONFIG_ALTIVEC */
|
|
|
-#ifdef CONFIG_VSX
|
|
|
- if (new->thread.regs && last_task_used_vsx == new)
|
|
|
- new->thread.regs->msr |= MSR_VSX;
|
|
|
-#endif /* CONFIG_VSX */
|
|
|
-#ifdef CONFIG_SPE
|
|
|
- /* Avoid the trap. On smp this this never happens since
|
|
|
- * we don't set last_task_used_spe
|
|
|
- */
|
|
|
- if (new->thread.regs && last_task_used_spe == new)
|
|
|
- new->thread.regs->msr |= MSR_SPE;
|
|
|
-#endif /* CONFIG_SPE */
|
|
|
+ if (new_thread->dscr_inherit) {
|
|
|
+ dscr = new_thread->dscr;
|
|
|
+ fscr |= FSCR_DSCR;
|
|
|
+ }
|
|
|
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
+ if (old_thread->dscr != dscr)
|
|
|
+ mtspr(SPRN_DSCR, dscr);
|
|
|
|
|
|
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
- switch_booke_debug_regs(&new->thread.debug);
|
|
|
-#else
|
|
|
-/*
|
|
|
- * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
|
|
|
- * schedule DABR
|
|
|
- */
|
|
|
-#ifndef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
- if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
|
|
|
- __set_breakpoint(&new->thread.hw_brk);
|
|
|
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
|
|
+ if (old_thread->fscr != fscr)
|
|
|
+ mtspr(SPRN_FSCR, fscr);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
|
+ if (old_thread->bescr != new_thread->bescr)
|
|
|
+ mtspr(SPRN_BESCR, new_thread->bescr);
|
|
|
+ if (old_thread->ebbhr != new_thread->ebbhr)
|
|
|
+ mtspr(SPRN_EBBHR, new_thread->ebbhr);
|
|
|
+ if (old_thread->ebbrr != new_thread->ebbrr)
|
|
|
+ mtspr(SPRN_EBBRR, new_thread->ebbrr);
|
|
|
+
|
|
|
+ if (old_thread->tar != new_thread->tar)
|
|
|
+ mtspr(SPRN_TAR, new_thread->tar);
|
|
|
+ }
|
|
|
#endif
|
|
|
+}
|
|
|
|
|
|
+struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
+ struct task_struct *new)
|
|
|
+{
|
|
|
+ struct thread_struct *new_thread, *old_thread;
|
|
|
+ struct task_struct *last;
|
|
|
+#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
+ struct ppc64_tlb_batch *batch;
|
|
|
+#endif
|
|
|
|
|
|
new_thread = &new->thread;
|
|
|
old_thread = ¤t->thread;
|
|
|
|
|
|
+ WARN_ON(!irqs_disabled());
|
|
|
+
|
|
|
#ifdef CONFIG_PPC64
|
|
|
/*
|
|
|
* Collect processor utilization data per process
|
|
@@ -890,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
}
|
|
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
|
|
|
|
|
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
+ switch_booke_debug_regs(&new->thread.debug);
|
|
|
+#else
|
|
|
+/*
|
|
|
+ * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
|
|
|
+ * schedule DABR
|
|
|
+ */
|
|
|
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
+ if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
|
|
|
+ __set_breakpoint(&new->thread.hw_brk);
|
|
|
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
|
|
+#endif
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We need to save SPRs before treclaim/trecheckpoint as these will
|
|
|
+ * change a number of them.
|
|
|
+ */
|
|
|
+ save_sprs(&prev->thread);
|
|
|
+
|
|
|
+ __switch_to_tm(prev);
|
|
|
+
|
|
|
+ /* Save FPU, Altivec, VSX and SPE state */
|
|
|
+ giveup_all(prev);
|
|
|
+
|
|
|
/*
|
|
|
* We can't take a PMU exception inside _switch() since there is a
|
|
|
* window where the kernel stack SLB and the kernel stack are out
|
|
@@ -899,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
|
|
|
tm_recheckpoint_new_task(new);
|
|
|
|
|
|
+ /*
|
|
|
+ * Call restore_sprs() before calling _switch(). If we move it after
|
|
|
+ * _switch() then we miss out on calling it for new tasks. The reason
|
|
|
+ * for this is we manually create a stack frame for new tasks that
|
|
|
+ * directly returns through ret_from_fork() or
|
|
|
+ * ret_from_kernel_thread(). See copy_thread() for details.
|
|
|
+ */
|
|
|
+ restore_sprs(old_thread, new_thread);
|
|
|
+
|
|
|
last = _switch(old_thread, new_thread);
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
@@ -952,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs)
|
|
|
printk("\n");
|
|
|
}
|
|
|
|
|
|
-static struct regbit {
|
|
|
+struct regbit {
|
|
|
unsigned long bit;
|
|
|
const char *name;
|
|
|
-} msr_bits[] = {
|
|
|
+};
|
|
|
+
|
|
|
+static struct regbit msr_bits[] = {
|
|
|
#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
|
|
|
{MSR_SF, "SF"},
|
|
|
{MSR_HV, "HV"},
|
|
@@ -985,16 +1086,49 @@ static struct regbit {
|
|
|
{0, NULL}
|
|
|
};
|
|
|
|
|
|
-static void printbits(unsigned long val, struct regbit *bits)
|
|
|
+static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
|
|
|
{
|
|
|
- const char *sep = "";
|
|
|
+ const char *s = "";
|
|
|
|
|
|
- printk("<");
|
|
|
for (; bits->bit; ++bits)
|
|
|
if (val & bits->bit) {
|
|
|
- printk("%s%s", sep, bits->name);
|
|
|
- sep = ",";
|
|
|
+ printk("%s%s", s, bits->name);
|
|
|
+ s = sep;
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
+static struct regbit msr_tm_bits[] = {
|
|
|
+ {MSR_TS_T, "T"},
|
|
|
+ {MSR_TS_S, "S"},
|
|
|
+ {MSR_TM, "E"},
|
|
|
+ {0, NULL}
|
|
|
+};
|
|
|
+
|
|
|
+static void print_tm_bits(unsigned long val)
|
|
|
+{
|
|
|
+/*
|
|
|
+ * This only prints something if at least one of the TM bit is set.
|
|
|
+ * Inside the TM[], the output means:
|
|
|
+ * E: Enabled (bit 32)
|
|
|
+ * S: Suspended (bit 33)
|
|
|
+ * T: Transactional (bit 34)
|
|
|
+ */
|
|
|
+ if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
|
|
|
+ printk(",TM[");
|
|
|
+ print_bits(val, msr_tm_bits, "");
|
|
|
+ printk("]");
|
|
|
+ }
|
|
|
+}
|
|
|
+#else
|
|
|
+static void print_tm_bits(unsigned long val) {}
|
|
|
+#endif
|
|
|
+
|
|
|
+static void print_msr_bits(unsigned long val)
|
|
|
+{
|
|
|
+ printk("<");
|
|
|
+ print_bits(val, msr_bits, ",");
|
|
|
+ print_tm_bits(val);
|
|
|
printk(">");
|
|
|
}
|
|
|
|
|
@@ -1019,7 +1153,7 @@ void show_regs(struct pt_regs * regs)
|
|
|
printk("REGS: %p TRAP: %04lx %s (%s)\n",
|
|
|
regs, regs->trap, print_tainted(), init_utsname()->release);
|
|
|
printk("MSR: "REG" ", regs->msr);
|
|
|
- printbits(regs->msr, msr_bits);
|
|
|
+ print_msr_bits(regs->msr);
|
|
|
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
|
|
|
trap = TRAP(regs);
|
|
|
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
|
|
@@ -1061,13 +1195,10 @@ void show_regs(struct pt_regs * regs)
|
|
|
|
|
|
void exit_thread(void)
|
|
|
{
|
|
|
- discard_lazy_cpu_state();
|
|
|
}
|
|
|
|
|
|
void flush_thread(void)
|
|
|
{
|
|
|
- discard_lazy_cpu_state();
|
|
|
-
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
flush_ptrace_hw_breakpoint(current);
|
|
|
#else /* CONFIG_HAVE_HW_BREAKPOINT */
|
|
@@ -1086,10 +1217,7 @@ release_thread(struct task_struct *t)
|
|
|
*/
|
|
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|
|
{
|
|
|
- flush_fp_to_thread(src);
|
|
|
- flush_altivec_to_thread(src);
|
|
|
- flush_vsx_to_thread(src);
|
|
|
- flush_spe_to_thread(src);
|
|
|
+ flush_all_to_thread(src);
|
|
|
/*
|
|
|
* Flush TM state out so we can copy it. __switch_to_tm() does this
|
|
|
* flush but it removes the checkpointed state from the current CPU and
|
|
@@ -1212,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|
|
#ifdef CONFIG_PPC64
|
|
|
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
|
|
p->thread.dscr_inherit = current->thread.dscr_inherit;
|
|
|
- p->thread.dscr = current->thread.dscr;
|
|
|
+ p->thread.dscr = mfspr(SPRN_DSCR);
|
|
|
}
|
|
|
if (cpu_has_feature(CPU_FTR_HAS_PPR))
|
|
|
p->thread.ppr = INIT_PPR;
|
|
@@ -1305,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|
|
regs->msr = MSR_USER32;
|
|
|
}
|
|
|
#endif
|
|
|
- discard_lazy_cpu_state();
|
|
|
#ifdef CONFIG_VSX
|
|
|
current->thread.used_vsr = 0;
|
|
|
#endif
|