memcpy_64.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * Copyright (C) 2002 Paul Mackerras, IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <asm/processor.h>
  10. #include <asm/ppc_asm.h>
  11. .align 7
  12. _GLOBAL(memcpy)
  13. BEGIN_FTR_SECTION
  14. std r3,48(r1) /* save destination pointer for return value */
  15. FTR_SECTION_ELSE
  16. #ifndef SELFTEST
  17. b memcpy_power7
  18. #endif
  19. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
  20. PPC_MTOCRF(0x01,r5)
  21. cmpldi cr1,r5,16
  22. neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
  23. andi. r6,r6,7
  24. dcbt 0,r4
  25. blt cr1,.Lshort_copy
  26. /* Below we want to nop out the bne if we're on a CPU that has the
  27. CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
  28. cleared.
  29. At the time of writing the only CPU that has this combination of bits
  30. set is Power6. */
  31. BEGIN_FTR_SECTION
  32. nop
  33. FTR_SECTION_ELSE
  34. bne .Ldst_unaligned
  35. ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
  36. CPU_FTR_UNALIGNED_LD_STD)
  37. .Ldst_aligned:
  38. addi r3,r3,-16
  39. BEGIN_FTR_SECTION
  40. andi. r0,r4,7
  41. bne .Lsrc_unaligned
  42. END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
  43. srdi r7,r5,4
  44. ld r9,0(r4)
  45. addi r4,r4,-8
  46. mtctr r7
  47. andi. r5,r5,7
  48. bf cr7*4+0,2f
  49. addi r3,r3,8
  50. addi r4,r4,8
  51. mr r8,r9
  52. blt cr1,3f
  53. 1: ld r9,8(r4)
  54. std r8,8(r3)
  55. 2: ldu r8,16(r4)
  56. stdu r9,16(r3)
  57. bdnz 1b
  58. 3: std r8,8(r3)
  59. beq 3f
  60. addi r3,r3,16
  61. .Ldo_tail:
  62. bf cr7*4+1,1f
  63. lwz r9,8(r4)
  64. addi r4,r4,4
  65. stw r9,0(r3)
  66. addi r3,r3,4
  67. 1: bf cr7*4+2,2f
  68. lhz r9,8(r4)
  69. addi r4,r4,2
  70. sth r9,0(r3)
  71. addi r3,r3,2
  72. 2: bf cr7*4+3,3f
  73. lbz r9,8(r4)
  74. stb r9,0(r3)
  75. 3: ld r3,48(r1) /* return dest pointer */
  76. blr
  77. .Lsrc_unaligned:
  78. srdi r6,r5,3
  79. addi r5,r5,-16
  80. subf r4,r0,r4
  81. srdi r7,r5,4
  82. sldi r10,r0,3
  83. cmpdi cr6,r6,3
  84. andi. r5,r5,7
  85. mtctr r7
  86. subfic r11,r10,64
  87. add r5,r5,r0
  88. bt cr7*4+0,0f
  89. ld r9,0(r4) # 3+2n loads, 2+2n stores
  90. ld r0,8(r4)
  91. sld r6,r9,r10
  92. ldu r9,16(r4)
  93. srd r7,r0,r11
  94. sld r8,r0,r10
  95. or r7,r7,r6
  96. blt cr6,4f
  97. ld r0,8(r4)
  98. # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
  99. b 2f
  100. 0: ld r0,0(r4) # 4+2n loads, 3+2n stores
  101. ldu r9,8(r4)
  102. sld r8,r0,r10
  103. addi r3,r3,-8
  104. blt cr6,5f
  105. ld r0,8(r4)
  106. srd r12,r9,r11
  107. sld r6,r9,r10
  108. ldu r9,16(r4)
  109. or r12,r8,r12
  110. srd r7,r0,r11
  111. sld r8,r0,r10
  112. addi r3,r3,16
  113. beq cr6,3f
  114. # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
  115. 1: or r7,r7,r6
  116. ld r0,8(r4)
  117. std r12,8(r3)
  118. 2: srd r12,r9,r11
  119. sld r6,r9,r10
  120. ldu r9,16(r4)
  121. or r12,r8,r12
  122. stdu r7,16(r3)
  123. srd r7,r0,r11
  124. sld r8,r0,r10
  125. bdnz 1b
  126. 3: std r12,8(r3)
  127. or r7,r7,r6
  128. 4: std r7,16(r3)
  129. 5: srd r12,r9,r11
  130. or r12,r8,r12
  131. std r12,24(r3)
  132. beq 4f
  133. cmpwi cr1,r5,8
  134. addi r3,r3,32
  135. sld r9,r9,r10
  136. ble cr1,6f
  137. ld r0,8(r4)
  138. srd r7,r0,r11
  139. or r9,r7,r9
  140. 6:
  141. bf cr7*4+1,1f
  142. rotldi r9,r9,32
  143. stw r9,0(r3)
  144. addi r3,r3,4
  145. 1: bf cr7*4+2,2f
  146. rotldi r9,r9,16
  147. sth r9,0(r3)
  148. addi r3,r3,2
  149. 2: bf cr7*4+3,3f
  150. rotldi r9,r9,8
  151. stb r9,0(r3)
  152. 3: ld r3,48(r1) /* return dest pointer */
  153. blr
  154. .Ldst_unaligned:
  155. PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
  156. subf r5,r6,r5
  157. li r7,0
  158. cmpldi cr1,r5,16
  159. bf cr7*4+3,1f
  160. lbz r0,0(r4)
  161. stb r0,0(r3)
  162. addi r7,r7,1
  163. 1: bf cr7*4+2,2f
  164. lhzx r0,r7,r4
  165. sthx r0,r7,r3
  166. addi r7,r7,2
  167. 2: bf cr7*4+1,3f
  168. lwzx r0,r7,r4
  169. stwx r0,r7,r3
  170. 3: PPC_MTOCRF(0x01,r5)
  171. add r4,r6,r4
  172. add r3,r6,r3
  173. b .Ldst_aligned
  174. .Lshort_copy:
  175. bf cr7*4+0,1f
  176. lwz r0,0(r4)
  177. lwz r9,4(r4)
  178. addi r4,r4,8
  179. stw r0,0(r3)
  180. stw r9,4(r3)
  181. addi r3,r3,8
  182. 1: bf cr7*4+1,2f
  183. lwz r0,0(r4)
  184. addi r4,r4,4
  185. stw r0,0(r3)
  186. addi r3,r3,4
  187. 2: bf cr7*4+2,3f
  188. lhz r0,0(r4)
  189. addi r4,r4,2
  190. sth r0,0(r3)
  191. addi r3,r3,2
  192. 3: bf cr7*4+3,4f
  193. lbz r0,0(r4)
  194. stb r0,0(r3)
  195. 4: ld r3,48(r1) /* return dest pointer */
  196. blr