Browse Source

x86-64, copy_user: Remove zero byte check before copy user buffer.

Operation of rep movsb instruction handles zero byte copy. As pointed out by
Linus, there is no need to check zero size in kernel. Removing this redundant
check saves a few cycles in copy user functions.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Link: http://lkml.kernel.org/r/1384634221-6006-1-git-send-email-fenghua.yu@intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Fenghua Yu 11 years ago
parent
commit
f4cb1cc18f
1 changed files with 2 additions and 6 deletions
  1. 2 6
      arch/x86/lib/copy_user_64.S

+ 2 - 6
arch/x86/lib/copy_user_64.S

@@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled)
 ENTRY(copy_user_generic_string)
 ENTRY(copy_user_generic_string)
 	CFI_STARTPROC
 	CFI_STARTPROC
 	ASM_STAC
 	ASM_STAC
-	andl %edx,%edx
-	jz 4f
 	cmpl $8,%edx
 	cmpl $8,%edx
 	jb 2f		/* less than 8 bytes, go to byte copy loop */
 	jb 2f		/* less than 8 bytes, go to byte copy loop */
 	ALIGN_DESTINATION
 	ALIGN_DESTINATION
@@ -249,7 +247,7 @@ ENTRY(copy_user_generic_string)
 2:	movl %edx,%ecx
 2:	movl %edx,%ecx
 3:	rep
 3:	rep
 	movsb
 	movsb
-4:	xorl %eax,%eax
+	xorl %eax,%eax
 	ASM_CLAC
 	ASM_CLAC
 	ret
 	ret
 
 
@@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string)
 ENTRY(copy_user_enhanced_fast_string)
 ENTRY(copy_user_enhanced_fast_string)
 	CFI_STARTPROC
 	CFI_STARTPROC
 	ASM_STAC
 	ASM_STAC
-	andl %edx,%edx
-	jz 2f
 	movl %edx,%ecx
 	movl %edx,%ecx
 1:	rep
 1:	rep
 	movsb
 	movsb
-2:	xorl %eax,%eax
+	xorl %eax,%eax
 	ASM_CLAC
 	ASM_CLAC
 	ret
 	ret