|
@@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
|
|
|
|
|
|
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
|
|
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
|
|
|
|
|
|
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
|
|
|
|
|
|
+ if (current->thread.regs &&
|
|
|
|
+ (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
|
|
check_if_tm_restore_required(current);
|
|
check_if_tm_restore_required(current);
|
|
/*
|
|
/*
|
|
* If a thread has already been reclaimed then the
|
|
* If a thread has already been reclaimed then the
|
|
@@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
|
|
{
|
|
{
|
|
if (tsk->thread.regs) {
|
|
if (tsk->thread.regs) {
|
|
preempt_disable();
|
|
preempt_disable();
|
|
- if (tsk->thread.regs->msr & MSR_VSX) {
|
|
|
|
|
|
+ if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
|
|
BUG_ON(tsk != current);
|
|
BUG_ON(tsk != current);
|
|
giveup_vsx(tsk);
|
|
giveup_vsx(tsk);
|
|
}
|
|
}
|