Explorar o código

x86/fpu: Use 'struct fpu' in fpstate_alloc_init()

Migrate this function to pure 'struct fpu' usage.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar %!s(int64=10) %!d(string=hai) anos
pai
achega
db2b1d3ad1

+ 1 - 1
arch/x86/include/asm/i387.h

@@ -18,7 +18,7 @@
 struct pt_regs;
 struct user_i387_struct;
 
-extern int fpstate_alloc_init(struct task_struct *curr);
+extern int fpstate_alloc_init(struct fpu *fpu);
 extern void fpstate_init(struct fpu *fpu);
 extern void fpu__flush_thread(struct task_struct *tsk);
 

+ 6 - 7
arch/x86/kernel/fpu/core.c

@@ -263,12 +263,11 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
  *
  * Can fail.
  */
-int fpstate_alloc_init(struct task_struct *curr)
+int fpstate_alloc_init(struct fpu *fpu)
 {
-	struct fpu *fpu = &curr->thread.fpu;
 	int ret;
 
-	if (WARN_ON_ONCE(curr != current))
+	if (WARN_ON_ONCE(fpu != &current->thread.fpu))
 		return -EINVAL;
 	if (WARN_ON_ONCE(fpu->fpstate_active))
 		return -EINVAL;
@@ -276,11 +275,11 @@ int fpstate_alloc_init(struct task_struct *curr)
 	/*
 	 * Memory allocation at the first usage of the FPU and other state.
 	 */
-	ret = fpstate_alloc(&curr->thread.fpu);
+	ret = fpstate_alloc(fpu);
 	if (ret)
 		return ret;
 
-	fpstate_init(&curr->thread.fpu);
+	fpstate_init(fpu);
 
 	/* Safe to do for the current task: */
 	fpu->fpstate_active = 1;
@@ -360,7 +359,7 @@ void fpu__restore(void)
 		/*
 		 * does a slab alloc which can sleep
 		 */
-		if (fpstate_alloc_init(tsk)) {
+		if (fpstate_alloc_init(fpu)) {
 			/*
 			 * ran out of memory!
 			 */
@@ -396,7 +395,7 @@ void fpu__flush_thread(struct task_struct *tsk)
 	} else {
 		if (!fpu->fpstate_active) {
 			/* kthread execs. TODO: cleanup this horror. */
-		if (WARN_ON(fpstate_alloc_init(tsk)))
+		if (WARN_ON(fpstate_alloc_init(fpu)))
 				force_sig(SIGKILL, tsk);
 			user_fpu_begin();
 		}

+ 1 - 1
arch/x86/kernel/fpu/xsave.c

@@ -350,7 +350,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 	if (!access_ok(VERIFY_READ, buf, size))
 		return -EACCES;
 
-	if (!fpu->fpstate_active && fpstate_alloc_init(tsk))
+	if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
 		return -1;
 
 	if (!static_cpu_has(X86_FEATURE_FPU))

+ 1 - 1
arch/x86/kvm/x86.c

@@ -6601,7 +6601,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	int r;
 	sigset_t sigsaved;
 
-	if (!fpu->fpstate_active && fpstate_alloc_init(current))
+	if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
 		return -ENOMEM;
 
 	if (vcpu->sigset_active)

+ 1 - 1
arch/x86/math-emu/fpu_entry.c

@@ -150,7 +150,7 @@ void math_emulate(struct math_emu_info *info)
 	struct fpu *fpu = &current->thread.fpu;
 
 	if (!fpu->fpstate_active) {
-		if (fpstate_alloc_init(current)) {
+		if (fpstate_alloc_init(fpu)) {
 			do_group_exit(SIGKILL);
 			return;
 		}