copy_page_64.S 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
  3. #include <linux/linkage.h>
  4. #include <asm/cpufeatures.h>
  5. #include <asm/alternative.h>
  6. #include <asm/export.h>
  7. /*
  8. * Some CPUs run faster using the string copy instructions (sane microcode).
  9. * It is also a lot simpler. Use this when possible. But, don't use streaming
  10. * copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the
  11. * prefetch distance based on SMP/UP.
  12. */
  13. ALIGN
  14. SYM_FUNC_START(copy_page)
  15. ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
  16. movl $4096/8, %ecx
  17. rep movsq
  18. RET
  19. SYM_FUNC_END(copy_page)
  20. EXPORT_SYMBOL(copy_page)
  21. SYM_FUNC_START_LOCAL(copy_page_regs)
  22. subq $2*8, %rsp
  23. movq %rbx, (%rsp)
  24. movq %r12, 1*8(%rsp)
  25. movl $(4096/64)-5, %ecx
  26. .p2align 4
  27. .Loop64:
  28. dec %rcx
  29. movq 0x8*0(%rsi), %rax
  30. movq 0x8*1(%rsi), %rbx
  31. movq 0x8*2(%rsi), %rdx
  32. movq 0x8*3(%rsi), %r8
  33. movq 0x8*4(%rsi), %r9
  34. movq 0x8*5(%rsi), %r10
  35. movq 0x8*6(%rsi), %r11
  36. movq 0x8*7(%rsi), %r12
  37. prefetcht0 5*64(%rsi)
  38. movq %rax, 0x8*0(%rdi)
  39. movq %rbx, 0x8*1(%rdi)
  40. movq %rdx, 0x8*2(%rdi)
  41. movq %r8, 0x8*3(%rdi)
  42. movq %r9, 0x8*4(%rdi)
  43. movq %r10, 0x8*5(%rdi)
  44. movq %r11, 0x8*6(%rdi)
  45. movq %r12, 0x8*7(%rdi)
  46. leaq 64 (%rsi), %rsi
  47. leaq 64 (%rdi), %rdi
  48. jnz .Loop64
  49. movl $5, %ecx
  50. .p2align 4
  51. .Loop2:
  52. decl %ecx
  53. movq 0x8*0(%rsi), %rax
  54. movq 0x8*1(%rsi), %rbx
  55. movq 0x8*2(%rsi), %rdx
  56. movq 0x8*3(%rsi), %r8
  57. movq 0x8*4(%rsi), %r9
  58. movq 0x8*5(%rsi), %r10
  59. movq 0x8*6(%rsi), %r11
  60. movq 0x8*7(%rsi), %r12
  61. movq %rax, 0x8*0(%rdi)
  62. movq %rbx, 0x8*1(%rdi)
  63. movq %rdx, 0x8*2(%rdi)
  64. movq %r8, 0x8*3(%rdi)
  65. movq %r9, 0x8*4(%rdi)
  66. movq %r10, 0x8*5(%rdi)
  67. movq %r11, 0x8*6(%rdi)
  68. movq %r12, 0x8*7(%rdi)
  69. leaq 64(%rdi), %rdi
  70. leaq 64(%rsi), %rsi
  71. jnz .Loop2
  72. movq (%rsp), %rbx
  73. movq 1*8(%rsp), %r12
  74. addq $2*8, %rsp
  75. RET
  76. SYM_FUNC_END(copy_page_regs)