memcpy_64.S 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright 2002 Andi Kleen */
  3. #include <linux/linkage.h>
  4. #include <linux/cfi_types.h>
  5. #include <asm/errno.h>
  6. #include <asm/cpufeatures.h>
  7. #include <asm/alternative.h>
  8. #include <asm/export.h>
  9. .pushsection .noinstr.text, "ax"
  10. /*
  11. * We build a jump to memcpy_orig by default which gets NOPped out on
  12. * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
  13. * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
  14. * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
  15. */
  16. /*
  17. * memcpy - Copy a memory block.
  18. *
  19. * Input:
  20. * rdi destination
  21. * rsi source
  22. * rdx count
  23. *
  24. * Output:
  25. * rax original destination
  26. */
  27. SYM_TYPED_FUNC_START(__memcpy)
  28. ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  29. "jmp memcpy_erms", X86_FEATURE_ERMS
  30. movq %rdi, %rax
  31. movq %rdx, %rcx
  32. shrq $3, %rcx
  33. andl $7, %edx
  34. rep movsq
  35. movl %edx, %ecx
  36. rep movsb
  37. RET
  38. SYM_FUNC_END(__memcpy)
  39. EXPORT_SYMBOL(__memcpy)
  40. SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
  41. EXPORT_SYMBOL(memcpy)
  42. /*
  43. * memcpy_erms() - enhanced fast string memcpy. This is faster and
  44. * simpler than memcpy. Use memcpy_erms when possible.
  45. */
  46. SYM_FUNC_START_LOCAL(memcpy_erms)
  47. movq %rdi, %rax
  48. movq %rdx, %rcx
  49. rep movsb
  50. RET
  51. SYM_FUNC_END(memcpy_erms)
  52. SYM_FUNC_START_LOCAL(memcpy_orig)
  53. movq %rdi, %rax
  54. cmpq $0x20, %rdx
  55. jb .Lhandle_tail
  56. /*
  57. * We check whether memory false dependence could occur,
  58. * then jump to corresponding copy mode.
  59. */
  60. cmp %dil, %sil
  61. jl .Lcopy_backward
  62. subq $0x20, %rdx
  63. .Lcopy_forward_loop:
  64. subq $0x20, %rdx
  65. /*
  66. * Move in blocks of 4x8 bytes:
  67. */
  68. movq 0*8(%rsi), %r8
  69. movq 1*8(%rsi), %r9
  70. movq 2*8(%rsi), %r10
  71. movq 3*8(%rsi), %r11
  72. leaq 4*8(%rsi), %rsi
  73. movq %r8, 0*8(%rdi)
  74. movq %r9, 1*8(%rdi)
  75. movq %r10, 2*8(%rdi)
  76. movq %r11, 3*8(%rdi)
  77. leaq 4*8(%rdi), %rdi
  78. jae .Lcopy_forward_loop
  79. addl $0x20, %edx
  80. jmp .Lhandle_tail
  81. .Lcopy_backward:
  82. /*
  83. * Calculate copy position to tail.
  84. */
  85. addq %rdx, %rsi
  86. addq %rdx, %rdi
  87. subq $0x20, %rdx
  88. /*
  89. * At most 3 ALU operations in one cycle,
  90. * so append NOPS in the same 16 bytes trunk.
  91. */
  92. .p2align 4
  93. .Lcopy_backward_loop:
  94. subq $0x20, %rdx
  95. movq -1*8(%rsi), %r8
  96. movq -2*8(%rsi), %r9
  97. movq -3*8(%rsi), %r10
  98. movq -4*8(%rsi), %r11
  99. leaq -4*8(%rsi), %rsi
  100. movq %r8, -1*8(%rdi)
  101. movq %r9, -2*8(%rdi)
  102. movq %r10, -3*8(%rdi)
  103. movq %r11, -4*8(%rdi)
  104. leaq -4*8(%rdi), %rdi
  105. jae .Lcopy_backward_loop
  106. /*
  107. * Calculate copy position to head.
  108. */
  109. addl $0x20, %edx
  110. subq %rdx, %rsi
  111. subq %rdx, %rdi
  112. .Lhandle_tail:
  113. cmpl $16, %edx
  114. jb .Lless_16bytes
  115. /*
  116. * Move data from 16 bytes to 31 bytes.
  117. */
  118. movq 0*8(%rsi), %r8
  119. movq 1*8(%rsi), %r9
  120. movq -2*8(%rsi, %rdx), %r10
  121. movq -1*8(%rsi, %rdx), %r11
  122. movq %r8, 0*8(%rdi)
  123. movq %r9, 1*8(%rdi)
  124. movq %r10, -2*8(%rdi, %rdx)
  125. movq %r11, -1*8(%rdi, %rdx)
  126. RET
  127. .p2align 4
  128. .Lless_16bytes:
  129. cmpl $8, %edx
  130. jb .Lless_8bytes
  131. /*
  132. * Move data from 8 bytes to 15 bytes.
  133. */
  134. movq 0*8(%rsi), %r8
  135. movq -1*8(%rsi, %rdx), %r9
  136. movq %r8, 0*8(%rdi)
  137. movq %r9, -1*8(%rdi, %rdx)
  138. RET
  139. .p2align 4
  140. .Lless_8bytes:
  141. cmpl $4, %edx
  142. jb .Lless_3bytes
  143. /*
  144. * Move data from 4 bytes to 7 bytes.
  145. */
  146. movl (%rsi), %ecx
  147. movl -4(%rsi, %rdx), %r8d
  148. movl %ecx, (%rdi)
  149. movl %r8d, -4(%rdi, %rdx)
  150. RET
  151. .p2align 4
  152. .Lless_3bytes:
  153. subl $1, %edx
  154. jb .Lend
  155. /*
  156. * Move data from 1 bytes to 3 bytes.
  157. */
  158. movzbl (%rsi), %ecx
  159. jz .Lstore_1byte
  160. movzbq 1(%rsi), %r8
  161. movzbq (%rsi, %rdx), %r9
  162. movb %r8b, 1(%rdi)
  163. movb %r9b, (%rdi, %rdx)
  164. .Lstore_1byte:
  165. movb %cl, (%rdi)
  166. .Lend:
  167. RET
  168. SYM_FUNC_END(memcpy_orig)
  169. .popsection