copy_mc_64.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
  3. #include <linux/linkage.h>
  4. #include <asm/asm.h>
  5. #ifndef CONFIG_UML
  6. #ifdef CONFIG_X86_MCE
  7. /*
  8. * copy_mc_fragile - copy memory with indication if an exception / fault happened
  9. *
  10. * The 'fragile' version is opted into by platform quirks and takes
  11. * pains to avoid unrecoverable corner cases like 'fast-string'
  12. * instruction sequences, and consuming poison across a cacheline
  13. * boundary. The non-fragile version is equivalent to memcpy()
  14. * regardless of CPU machine-check-recovery capability.
  15. */
  16. SYM_FUNC_START(copy_mc_fragile)
  17. cmpl $8, %edx
  18. /* Less than 8 bytes? Go to byte copy loop */
  19. jb .L_no_whole_words
  20. /* Check for bad alignment of source */
  21. testl $7, %esi
  22. /* Already aligned */
  23. jz .L_8byte_aligned
  24. /* Copy one byte at a time until source is 8-byte aligned */
  25. movl %esi, %ecx
  26. andl $7, %ecx
  27. subl $8, %ecx
  28. negl %ecx
  29. subl %ecx, %edx
  30. .L_read_leading_bytes:
  31. movb (%rsi), %al
  32. .L_write_leading_bytes:
  33. movb %al, (%rdi)
  34. incq %rsi
  35. incq %rdi
  36. decl %ecx
  37. jnz .L_read_leading_bytes
  38. .L_8byte_aligned:
  39. movl %edx, %ecx
  40. andl $7, %edx
  41. shrl $3, %ecx
  42. jz .L_no_whole_words
  43. .L_read_words:
  44. movq (%rsi), %r8
  45. .L_write_words:
  46. movq %r8, (%rdi)
  47. addq $8, %rsi
  48. addq $8, %rdi
  49. decl %ecx
  50. jnz .L_read_words
  51. /* Any trailing bytes? */
  52. .L_no_whole_words:
  53. andl %edx, %edx
  54. jz .L_done_memcpy_trap
  55. /* Copy trailing bytes */
  56. movl %edx, %ecx
  57. .L_read_trailing_bytes:
  58. movb (%rsi), %al
  59. .L_write_trailing_bytes:
  60. movb %al, (%rdi)
  61. incq %rsi
  62. incq %rdi
  63. decl %ecx
  64. jnz .L_read_trailing_bytes
  65. /* Copy successful. Return zero */
  66. .L_done_memcpy_trap:
  67. xorl %eax, %eax
  68. .L_done:
  69. RET
  70. /*
  71. * Return number of bytes not copied for any failure. Note that
  72. * there is no "tail" handling since the source buffer is 8-byte
  73. * aligned and poison is cacheline aligned.
  74. */
  75. .E_read_words:
  76. shll $3, %ecx
  77. .E_leading_bytes:
  78. addl %edx, %ecx
  79. .E_trailing_bytes:
  80. mov %ecx, %eax
  81. jmp .L_done
  82. /*
  83. * For write fault handling, given the destination is unaligned,
  84. * we handle faults on multi-byte writes with a byte-by-byte
  85. * copy up to the write-protected page.
  86. */
  87. .E_write_words:
  88. shll $3, %ecx
  89. addl %edx, %ecx
  90. movl %ecx, %edx
  91. jmp copy_mc_fragile_handle_tail
  92. _ASM_EXTABLE_TYPE(.L_read_leading_bytes, .E_leading_bytes, EX_TYPE_DEFAULT_MCE_SAFE)
  93. _ASM_EXTABLE_TYPE(.L_read_words, .E_read_words, EX_TYPE_DEFAULT_MCE_SAFE)
  94. _ASM_EXTABLE_TYPE(.L_read_trailing_bytes, .E_trailing_bytes, EX_TYPE_DEFAULT_MCE_SAFE)
  95. _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
  96. _ASM_EXTABLE(.L_write_words, .E_write_words)
  97. _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
  98. SYM_FUNC_END(copy_mc_fragile)
  99. #endif /* CONFIG_X86_MCE */
  100. /*
  101. * copy_mc_enhanced_fast_string - memory copy with exception handling
  102. *
  103. * Fast string copy + fault / exception handling. If the CPU does
  104. * support machine check exception recovery, but does not support
  105. * recovering from fast-string exceptions then this CPU needs to be
  106. * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
  107. * machine check recovery support this version should be no slower than
  108. * standard memcpy.
  109. */
  110. SYM_FUNC_START(copy_mc_enhanced_fast_string)
  111. movq %rdi, %rax
  112. movq %rdx, %rcx
  113. .L_copy:
  114. rep movsb
  115. /* Copy successful. Return zero */
  116. xorl %eax, %eax
  117. RET
  118. .E_copy:
  119. /*
  120. * On fault %rcx is updated such that the copy instruction could
  121. * optionally be restarted at the fault position, i.e. it
  122. * contains 'bytes remaining'. A non-zero return indicates error
  123. * to copy_mc_generic() users, or indicate short transfers to
  124. * user-copy routines.
  125. */
  126. movq %rcx, %rax
  127. RET
  128. _ASM_EXTABLE_TYPE(.L_copy, .E_copy, EX_TYPE_DEFAULT_MCE_SAFE)
  129. SYM_FUNC_END(copy_mc_enhanced_fast_string)
  130. #endif /* !CONFIG_UML */