|
@@ -232,17 +232,30 @@ ENDPROC(copy_user_enhanced_fast_string)
|
|
|
|
|
|
/*
|
|
|
* copy_user_nocache - Uncached memory copy with exception handling
|
|
|
- * This will force destination/source out of cache for more performance.
|
|
|
+ * This will force destination out of cache for more performance.
|
|
|
+ *
|
|
|
+ * Note: Cached memory copy is used when destination or size is not
|
|
|
+ * naturally aligned. That is:
|
|
|
+ * - Require 8-byte alignment when size is 8 bytes or larger.
|
|
|
*/
|
|
|
ENTRY(__copy_user_nocache)
|
|
|
ASM_STAC
|
|
|
+
|
|
|
+ /* If size is less than 8 bytes, go to byte copy */
|
|
|
cmpl $8,%edx
|
|
|
- jb 20f /* less then 8 bytes, go to byte copy loop */
|
|
|
+ jb .L_1b_cache_copy_entry
|
|
|
+
|
|
|
+ /* If destination is not 8-byte aligned, "cache" copy to align it */
|
|
|
ALIGN_DESTINATION
|
|
|
+
|
|
|
+ /* Set 4x8-byte copy count and remainder */
|
|
|
movl %edx,%ecx
|
|
|
andl $63,%edx
|
|
|
shrl $6,%ecx
|
|
|
- jz 17f
|
|
|
+ jz .L_8b_nocache_copy_entry /* jump if count is 0 */
|
|
|
+
|
|
|
+ /* Perform 4x8-byte nocache loop-copy */
|
|
|
+.L_4x8b_nocache_copy_loop:
|
|
|
1: movq (%rsi),%r8
|
|
|
2: movq 1*8(%rsi),%r9
|
|
|
3: movq 2*8(%rsi),%r10
|
|
@@ -262,60 +275,79 @@ ENTRY(__copy_user_nocache)
|
|
|
leaq 64(%rsi),%rsi
|
|
|
leaq 64(%rdi),%rdi
|
|
|
decl %ecx
|
|
|
- jnz 1b
|
|
|
-17: movl %edx,%ecx
|
|
|
+ jnz .L_4x8b_nocache_copy_loop
|
|
|
+
|
|
|
+ /* Set 8-byte copy count and remainder */
|
|
|
+.L_8b_nocache_copy_entry:
|
|
|
+ movl %edx,%ecx
|
|
|
andl $7,%edx
|
|
|
shrl $3,%ecx
|
|
|
- jz 20f
|
|
|
-18: movq (%rsi),%r8
|
|
|
-19: movnti %r8,(%rdi)
|
|
|
+ jz .L_1b_cache_copy_entry /* jump if count is 0 */
|
|
|
+
|
|
|
+ /* Perform 8-byte nocache loop-copy */
|
|
|
+.L_8b_nocache_copy_loop:
|
|
|
+20: movq (%rsi),%r8
|
|
|
+21: movnti %r8,(%rdi)
|
|
|
leaq 8(%rsi),%rsi
|
|
|
leaq 8(%rdi),%rdi
|
|
|
decl %ecx
|
|
|
- jnz 18b
|
|
|
-20: andl %edx,%edx
|
|
|
- jz 23f
|
|
|
+ jnz .L_8b_nocache_copy_loop
|
|
|
+
|
|
|
+ /* If no byte left, we're done */
|
|
|
+.L_1b_cache_copy_entry:
|
|
|
+ andl %edx,%edx
|
|
|
+ jz .L_finish_copy
|
|
|
+
|
|
|
+ /* Perform byte "cache" loop-copy for the remainder */
|
|
|
movl %edx,%ecx
|
|
|
-21: movb (%rsi),%al
|
|
|
-22: movb %al,(%rdi)
|
|
|
+.L_1b_cache_copy_loop:
|
|
|
+40: movb (%rsi),%al
|
|
|
+41: movb %al,(%rdi)
|
|
|
incq %rsi
|
|
|
incq %rdi
|
|
|
decl %ecx
|
|
|
- jnz 21b
|
|
|
-23: xorl %eax,%eax
|
|
|
+ jnz .L_1b_cache_copy_loop
|
|
|
+
|
|
|
+ /* Finished copying; fence the prior stores */
|
|
|
+.L_finish_copy:
|
|
|
+ xorl %eax,%eax
|
|
|
ASM_CLAC
|
|
|
sfence
|
|
|
ret
|
|
|
|
|
|
.section .fixup,"ax"
|
|
|
-30: shll $6,%ecx
|
|
|
+.L_fixup_4x8b_copy:
|
|
|
+ shll $6,%ecx
|
|
|
addl %ecx,%edx
|
|
|
- jmp 60f
|
|
|
-40: lea (%rdx,%rcx,8),%rdx
|
|
|
- jmp 60f
|
|
|
-50: movl %ecx,%edx
|
|
|
-60: sfence
|
|
|
+ jmp .L_fixup_handle_tail
|
|
|
+.L_fixup_8b_copy:
|
|
|
+ lea (%rdx,%rcx,8),%rdx
|
|
|
+ jmp .L_fixup_handle_tail
|
|
|
+.L_fixup_1b_copy:
|
|
|
+ movl %ecx,%edx
|
|
|
+.L_fixup_handle_tail:
|
|
|
+ sfence
|
|
|
jmp copy_user_handle_tail
|
|
|
.previous
|
|
|
|
|
|
- _ASM_EXTABLE(1b,30b)
|
|
|
- _ASM_EXTABLE(2b,30b)
|
|
|
- _ASM_EXTABLE(3b,30b)
|
|
|
- _ASM_EXTABLE(4b,30b)
|
|
|
- _ASM_EXTABLE(5b,30b)
|
|
|
- _ASM_EXTABLE(6b,30b)
|
|
|
- _ASM_EXTABLE(7b,30b)
|
|
|
- _ASM_EXTABLE(8b,30b)
|
|
|
- _ASM_EXTABLE(9b,30b)
|
|
|
- _ASM_EXTABLE(10b,30b)
|
|
|
- _ASM_EXTABLE(11b,30b)
|
|
|
- _ASM_EXTABLE(12b,30b)
|
|
|
- _ASM_EXTABLE(13b,30b)
|
|
|
- _ASM_EXTABLE(14b,30b)
|
|
|
- _ASM_EXTABLE(15b,30b)
|
|
|
- _ASM_EXTABLE(16b,30b)
|
|
|
- _ASM_EXTABLE(18b,40b)
|
|
|
- _ASM_EXTABLE(19b,40b)
|
|
|
- _ASM_EXTABLE(21b,50b)
|
|
|
- _ASM_EXTABLE(22b,50b)
|
|
|
+ _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
|
|
|
+ _ASM_EXTABLE(20b,.L_fixup_8b_copy)
|
|
|
+ _ASM_EXTABLE(21b,.L_fixup_8b_copy)
|
|
|
+ _ASM_EXTABLE(40b,.L_fixup_1b_copy)
|
|
|
+ _ASM_EXTABLE(41b,.L_fixup_1b_copy)
|
|
|
ENDPROC(__copy_user_nocache)
|