|
@@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7)
|
|
cmpldi r5,16
|
|
cmpldi r5,16
|
|
cmpldi cr1,r5,4096
|
|
cmpldi cr1,r5,4096
|
|
|
|
|
|
- std r3,STK_PARAM(R1)(r1)
|
|
|
|
|
|
+ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
|
|
|
|
|
|
blt .Lshort_copy
|
|
blt .Lshort_copy
|
|
bgt cr1,.Lvmx_copy
|
|
bgt cr1,.Lvmx_copy
|
|
#else
|
|
#else
|
|
cmpldi r5,16
|
|
cmpldi r5,16
|
|
|
|
|
|
- std r3,STK_PARAM(R1)(r1)
|
|
|
|
|
|
+ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
|
|
|
|
|
|
blt .Lshort_copy
|
|
blt .Lshort_copy
|
|
#endif
|
|
#endif
|
|
@@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7)
|
|
lbz r0,0(r4)
|
|
lbz r0,0(r4)
|
|
stb r0,0(r3)
|
|
stb r0,0(r3)
|
|
|
|
|
|
-15: ld r3,STK_PARAM(R3)(r1)
|
|
|
|
|
|
+15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
|
|
blr
|
|
blr
|
|
|
|
|
|
.Lunwind_stack_nonvmx_copy:
|
|
.Lunwind_stack_nonvmx_copy:
|
|
@@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7)
|
|
#ifdef CONFIG_ALTIVEC
|
|
#ifdef CONFIG_ALTIVEC
|
|
.Lvmx_copy:
|
|
.Lvmx_copy:
|
|
mflr r0
|
|
mflr r0
|
|
- std r4,STK_PARAM(R4)(r1)
|
|
|
|
- std r5,STK_PARAM(R5)(r1)
|
|
|
|
|
|
+ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
|
|
|
|
+ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
|
|
std r0,16(r1)
|
|
std r0,16(r1)
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
|
bl enter_vmx_copy
|
|
bl enter_vmx_copy
|
|
cmpwi cr1,r3,0
|
|
cmpwi cr1,r3,0
|
|
ld r0,STACKFRAMESIZE+16(r1)
|
|
ld r0,STACKFRAMESIZE+16(r1)
|
|
- ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
|
|
|
|
- ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
|
|
|
|
- ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1)
|
|
|
|
|
|
+ ld r3,STK_REG(R31)(r1)
|
|
|
|
+ ld r4,STK_REG(R30)(r1)
|
|
|
|
+ ld r5,STK_REG(R29)(r1)
|
|
mtlr r0
|
|
mtlr r0
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -447,7 +447,7 @@ _GLOBAL(memcpy_power7)
|
|
stb r0,0(r3)
|
|
stb r0,0(r3)
|
|
|
|
|
|
15: addi r1,r1,STACKFRAMESIZE
|
|
15: addi r1,r1,STACKFRAMESIZE
|
|
- ld r3,STK_PARAM(R3)(r1)
|
|
|
|
|
|
+ ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
|
|
b exit_vmx_copy /* tail call optimise */
|
|
b exit_vmx_copy /* tail call optimise */
|
|
|
|
|
|
.Lvmx_unaligned_copy:
|
|
.Lvmx_unaligned_copy:
|
|
@@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7)
|
|
stb r0,0(r3)
|
|
stb r0,0(r3)
|
|
|
|
|
|
15: addi r1,r1,STACKFRAMESIZE
|
|
15: addi r1,r1,STACKFRAMESIZE
|
|
- ld r3,STK_PARAM(R3)(r1)
|
|
|
|
|
|
+ ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
|
|
b exit_vmx_copy /* tail call optimise */
|
|
b exit_vmx_copy /* tail call optimise */
|
|
#endif /* CONFiG_ALTIVEC */
|
|
#endif /* CONFiG_ALTIVEC */
|