memmove_64.S 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Normally compiler builtins are used, but sometimes the compiler calls out
  3. * of line code. Based on asm-i386/string.h.
  4. *
  5. * This assembly file is re-written from memmove_64.c file.
  6. * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/dwarf2.h>
  10. #include <asm/cpufeature.h>
  11. #include <asm/alternative-asm.h>
  12. #undef memmove
  13. /*
  14. * Implement memmove(). This can handle overlap between src and dst.
  15. *
  16. * Input:
  17. * rdi: dest
  18. * rsi: src
  19. * rdx: count
  20. *
  21. * Output:
  22. * rax: dest
  23. */
  24. .weak memmove
  25. ENTRY(memmove)
  26. ENTRY(__memmove)
  27. CFI_STARTPROC
  28. /* Handle more 32 bytes in loop */
  29. mov %rdi, %rax
  30. cmp $0x20, %rdx
  31. jb 1f
  32. /* Decide forward/backward copy mode */
  33. cmp %rdi, %rsi
  34. jge .Lmemmove_begin_forward
  35. mov %rsi, %r8
  36. add %rdx, %r8
  37. cmp %rdi, %r8
  38. jg 2f
  39. .Lmemmove_begin_forward:
  40. ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
  41. /*
  42. * movsq instruction have many startup latency
  43. * so we handle small size by general register.
  44. */
  45. cmp $680, %rdx
  46. jb 3f
  47. /*
  48. * movsq instruction is only good for aligned case.
  49. */
  50. cmpb %dil, %sil
  51. je 4f
  52. 3:
  53. sub $0x20, %rdx
  54. /*
  55. * We gobble 32 bytes forward in each loop.
  56. */
  57. 5:
  58. sub $0x20, %rdx
  59. movq 0*8(%rsi), %r11
  60. movq 1*8(%rsi), %r10
  61. movq 2*8(%rsi), %r9
  62. movq 3*8(%rsi), %r8
  63. leaq 4*8(%rsi), %rsi
  64. movq %r11, 0*8(%rdi)
  65. movq %r10, 1*8(%rdi)
  66. movq %r9, 2*8(%rdi)
  67. movq %r8, 3*8(%rdi)
  68. leaq 4*8(%rdi), %rdi
  69. jae 5b
  70. addq $0x20, %rdx
  71. jmp 1f
  72. /*
  73. * Handle data forward by movsq.
  74. */
  75. .p2align 4
  76. 4:
  77. movq %rdx, %rcx
  78. movq -8(%rsi, %rdx), %r11
  79. lea -8(%rdi, %rdx), %r10
  80. shrq $3, %rcx
  81. rep movsq
  82. movq %r11, (%r10)
  83. jmp 13f
  84. .Lmemmove_end_forward:
  85. /*
  86. * Handle data backward by movsq.
  87. */
  88. .p2align 4
  89. 7:
  90. movq %rdx, %rcx
  91. movq (%rsi), %r11
  92. movq %rdi, %r10
  93. leaq -8(%rsi, %rdx), %rsi
  94. leaq -8(%rdi, %rdx), %rdi
  95. shrq $3, %rcx
  96. std
  97. rep movsq
  98. cld
  99. movq %r11, (%r10)
  100. jmp 13f
  101. /*
  102. * Start to prepare for backward copy.
  103. */
  104. .p2align 4
  105. 2:
  106. cmp $680, %rdx
  107. jb 6f
  108. cmp %dil, %sil
  109. je 7b
  110. 6:
  111. /*
  112. * Calculate copy position to tail.
  113. */
  114. addq %rdx, %rsi
  115. addq %rdx, %rdi
  116. subq $0x20, %rdx
  117. /*
  118. * We gobble 32 bytes backward in each loop.
  119. */
  120. 8:
  121. subq $0x20, %rdx
  122. movq -1*8(%rsi), %r11
  123. movq -2*8(%rsi), %r10
  124. movq -3*8(%rsi), %r9
  125. movq -4*8(%rsi), %r8
  126. leaq -4*8(%rsi), %rsi
  127. movq %r11, -1*8(%rdi)
  128. movq %r10, -2*8(%rdi)
  129. movq %r9, -3*8(%rdi)
  130. movq %r8, -4*8(%rdi)
  131. leaq -4*8(%rdi), %rdi
  132. jae 8b
  133. /*
  134. * Calculate copy position to head.
  135. */
  136. addq $0x20, %rdx
  137. subq %rdx, %rsi
  138. subq %rdx, %rdi
  139. 1:
  140. cmpq $16, %rdx
  141. jb 9f
  142. /*
  143. * Move data from 16 bytes to 31 bytes.
  144. */
  145. movq 0*8(%rsi), %r11
  146. movq 1*8(%rsi), %r10
  147. movq -2*8(%rsi, %rdx), %r9
  148. movq -1*8(%rsi, %rdx), %r8
  149. movq %r11, 0*8(%rdi)
  150. movq %r10, 1*8(%rdi)
  151. movq %r9, -2*8(%rdi, %rdx)
  152. movq %r8, -1*8(%rdi, %rdx)
  153. jmp 13f
  154. .p2align 4
  155. 9:
  156. cmpq $8, %rdx
  157. jb 10f
  158. /*
  159. * Move data from 8 bytes to 15 bytes.
  160. */
  161. movq 0*8(%rsi), %r11
  162. movq -1*8(%rsi, %rdx), %r10
  163. movq %r11, 0*8(%rdi)
  164. movq %r10, -1*8(%rdi, %rdx)
  165. jmp 13f
  166. 10:
  167. cmpq $4, %rdx
  168. jb 11f
  169. /*
  170. * Move data from 4 bytes to 7 bytes.
  171. */
  172. movl (%rsi), %r11d
  173. movl -4(%rsi, %rdx), %r10d
  174. movl %r11d, (%rdi)
  175. movl %r10d, -4(%rdi, %rdx)
  176. jmp 13f
  177. 11:
  178. cmp $2, %rdx
  179. jb 12f
  180. /*
  181. * Move data from 2 bytes to 3 bytes.
  182. */
  183. movw (%rsi), %r11w
  184. movw -2(%rsi, %rdx), %r10w
  185. movw %r11w, (%rdi)
  186. movw %r10w, -2(%rdi, %rdx)
  187. jmp 13f
  188. 12:
  189. cmp $1, %rdx
  190. jb 13f
  191. /*
  192. * Move data for 1 byte.
  193. */
  194. movb (%rsi), %r11b
  195. movb %r11b, (%rdi)
  196. 13:
  197. retq
  198. CFI_ENDPROC
  199. ENDPROC(__memmove)
  200. ENDPROC(memmove)