|
@@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
|
|
movq %r12, 3*8(%rsp)
|
|
movq %r12, 3*8(%rsp)
|
|
movq %r14, 4*8(%rsp)
|
|
movq %r14, 4*8(%rsp)
|
|
movq %r13, 5*8(%rsp)
|
|
movq %r13, 5*8(%rsp)
|
|
- movq %rbp, 6*8(%rsp)
|
|
|
|
|
|
+ movq %r15, 6*8(%rsp)
|
|
|
|
|
|
movq %r8, (%rsp)
|
|
movq %r8, (%rsp)
|
|
movq %r9, 1*8(%rsp)
|
|
movq %r9, 1*8(%rsp)
|
|
@@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
|
|
/* main loop. clear in 64 byte blocks */
|
|
/* main loop. clear in 64 byte blocks */
|
|
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
|
|
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
|
|
/* r11: temp3, rdx: temp4, r12 loopcnt */
|
|
/* r11: temp3, rdx: temp4, r12 loopcnt */
|
|
- /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
|
|
|
|
|
|
+ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
|
|
.p2align 4
|
|
.p2align 4
|
|
.Lloop:
|
|
.Lloop:
|
|
source
|
|
source
|
|
@@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
|
|
source
|
|
source
|
|
movq 32(%rdi), %r10
|
|
movq 32(%rdi), %r10
|
|
source
|
|
source
|
|
- movq 40(%rdi), %rbp
|
|
|
|
|
|
+ movq 40(%rdi), %r15
|
|
source
|
|
source
|
|
movq 48(%rdi), %r14
|
|
movq 48(%rdi), %r14
|
|
source
|
|
source
|
|
@@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
|
|
adcq %r11, %rax
|
|
adcq %r11, %rax
|
|
adcq %rdx, %rax
|
|
adcq %rdx, %rax
|
|
adcq %r10, %rax
|
|
adcq %r10, %rax
|
|
- adcq %rbp, %rax
|
|
|
|
|
|
+ adcq %r15, %rax
|
|
adcq %r14, %rax
|
|
adcq %r14, %rax
|
|
adcq %r13, %rax
|
|
adcq %r13, %rax
|
|
|
|
|
|
@@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
|
|
dest
|
|
dest
|
|
movq %r10, 32(%rsi)
|
|
movq %r10, 32(%rsi)
|
|
dest
|
|
dest
|
|
- movq %rbp, 40(%rsi)
|
|
|
|
|
|
+ movq %r15, 40(%rsi)
|
|
dest
|
|
dest
|
|
movq %r14, 48(%rsi)
|
|
movq %r14, 48(%rsi)
|
|
dest
|
|
dest
|
|
@@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
|
|
movq 3*8(%rsp), %r12
|
|
movq 3*8(%rsp), %r12
|
|
movq 4*8(%rsp), %r14
|
|
movq 4*8(%rsp), %r14
|
|
movq 5*8(%rsp), %r13
|
|
movq 5*8(%rsp), %r13
|
|
- movq 6*8(%rsp), %rbp
|
|
|
|
|
|
+ movq 6*8(%rsp), %r15
|
|
addq $7*8, %rsp
|
|
addq $7*8, %rsp
|
|
ret
|
|
ret
|
|
|
|
|