浏览代码

powerpc: Add the ability to save Altivec without giving it up

This patch adds the ability to be able to save the VEC registers to the
thread struct without giving up (disabling the facility) next time the
process returns to userspace.

This patch builds on a previous optimisation for the FPU registers in the
thread copy path to avoid a possibly pointless reload of VEC state.

Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Cyril Bur 9 年之前
父节点
当前提交
6f515d842e
共有 3 个文件被更改,包括 17 次插入22 次删除
  1. 2 1
      arch/powerpc/include/asm/switch_to.h
  2. 11 1
      arch/powerpc/kernel/process.c
  3. 4 20
      arch/powerpc/kernel/vector.S

+ 2 - 1
arch/powerpc/include/asm/switch_to.h

@@ -43,12 +43,13 @@ static inline void flush_fp_to_thread(struct task_struct *t) { }
 extern void enable_kernel_altivec(void);
 extern void enable_kernel_altivec(void);
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
-extern void __giveup_altivec(struct task_struct *);
+extern void save_altivec(struct task_struct *);
 static inline void disable_kernel_altivec(void)
 static inline void disable_kernel_altivec(void)
 {
 {
 	msr_check_and_clear(MSR_VEC);
 	msr_check_and_clear(MSR_VEC);
 }
 }
 #else
 #else
+static inline void save_altivec(struct task_struct *t) { }
 static inline void __giveup_altivec(struct task_struct *t) { }
 static inline void __giveup_altivec(struct task_struct *t) { }
 #endif
 #endif
 
 

+ 11 - 1
arch/powerpc/kernel/process.c

@@ -213,6 +213,16 @@ static int restore_fp(struct task_struct *tsk) { return 0; }
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
 #define loadvec(thr) ((thr).load_vec)
 #define loadvec(thr) ((thr).load_vec)
 
 
+static void __giveup_altivec(struct task_struct *tsk)
+{
+	save_altivec(tsk);
+	tsk->thread.regs->msr &= ~MSR_VEC;
+#ifdef CONFIG_VSX
+	if (cpu_has_feature(CPU_FTR_VSX))
+		tsk->thread.regs->msr &= ~MSR_VSX;
+#endif
+}
+
 void giveup_altivec(struct task_struct *tsk)
 void giveup_altivec(struct task_struct *tsk)
 {
 {
 	check_if_tm_restore_required(tsk);
 	check_if_tm_restore_required(tsk);
@@ -472,7 +482,7 @@ void save_all(struct task_struct *tsk)
 		save_fpu(tsk);
 		save_fpu(tsk);
 
 
 	if (usermsr & MSR_VEC)
 	if (usermsr & MSR_VEC)
-		__giveup_altivec(tsk);
+		save_altivec(tsk);
 
 
 	if (usermsr & MSR_VSX)
 	if (usermsr & MSR_VSX)
 		__giveup_vsx(tsk);
 		__giveup_vsx(tsk);

+ 4 - 20
arch/powerpc/kernel/vector.S

@@ -106,36 +106,20 @@ _GLOBAL(load_up_altivec)
 	blr
 	blr
 
 
 /*
 /*
- * __giveup_altivec(tsk)
- * Disable VMX for the task given as the argument,
- * and save the vector registers in its thread_struct.
+ * save_altivec(tsk)
+ * Save the vector registers to its thread_struct
  */
  */
-_GLOBAL(__giveup_altivec)
+_GLOBAL(save_altivec)
 	addi	r3,r3,THREAD		/* want THREAD of task */
 	addi	r3,r3,THREAD		/* want THREAD of task */
 	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
 	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
 	PPC_LL	r5,PT_REGS(r3)
 	PPC_LL	r5,PT_REGS(r3)
 	PPC_LCMPI	0,r7,0
 	PPC_LCMPI	0,r7,0
 	bne	2f
 	bne	2f
 	addi	r7,r3,THREAD_VRSTATE
 	addi	r7,r3,THREAD_VRSTATE
-2:	PPC_LCMPI	0,r5,0
-	SAVE_32VRS(0,r4,r7)
+2:	SAVE_32VRS(0,r4,r7)
 	mfvscr	v0
 	mfvscr	v0
 	li	r4,VRSTATE_VSCR
 	li	r4,VRSTATE_VSCR
 	stvx	v0,r4,r7
 	stvx	v0,r4,r7
-	beq	1f
-	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-	lis	r3,(MSR_VEC|MSR_VSX)@h
-FTR_SECTION_ELSE
-	lis	r3,MSR_VEC@h
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#else
-	lis	r3,MSR_VEC@h
-#endif
-	andc	r4,r4,r3		/* disable FP for previous task */
-	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
 	blr
 	blr
 
 
 #ifdef CONFIG_VSX
 #ifdef CONFIG_VSX