copy_user_64.S 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright 2008 Vitaly Mayatskikh <[email protected]>
  4. * Copyright 2002 Andi Kleen, SuSE Labs.
  5. *
  6. * Functions to copy from and to user space.
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/current.h>
  10. #include <asm/asm-offsets.h>
  11. #include <asm/thread_info.h>
  12. #include <asm/cpufeatures.h>
  13. #include <asm/alternative.h>
  14. #include <asm/asm.h>
  15. #include <asm/smap.h>
  16. #include <asm/export.h>
  17. #include <asm/trapnr.h>
  18. .macro ALIGN_DESTINATION
  19. /* check for bad alignment of destination */
  20. movl %edi,%ecx
  21. andl $7,%ecx
  22. jz 102f /* already aligned */
  23. subl $8,%ecx
  24. negl %ecx
  25. subl %ecx,%edx
  26. 100: movb (%rsi),%al
  27. 101: movb %al,(%rdi)
  28. incq %rsi
  29. incq %rdi
  30. decl %ecx
  31. jnz 100b
  32. 102:
  33. _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align)
  34. _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align)
  35. .endm
  36. /*
  37. * copy_user_generic_unrolled - memory copy with exception handling.
  38. * This version is for CPUs like P4 that don't have efficient micro
  39. * code for rep movsq
  40. *
  41. * Input:
  42. * rdi destination
  43. * rsi source
  44. * rdx count
  45. *
  46. * Output:
  47. * eax uncopied bytes or 0 if successful.
  48. */
  49. SYM_FUNC_START(copy_user_generic_unrolled)
  50. ASM_STAC
  51. cmpl $8,%edx
  52. jb .Lcopy_user_short_string_bytes
  53. ALIGN_DESTINATION
  54. movl %edx,%ecx
  55. andl $63,%edx
  56. shrl $6,%ecx
  57. jz copy_user_short_string
  58. 1: movq (%rsi),%r8
  59. 2: movq 1*8(%rsi),%r9
  60. 3: movq 2*8(%rsi),%r10
  61. 4: movq 3*8(%rsi),%r11
  62. 5: movq %r8,(%rdi)
  63. 6: movq %r9,1*8(%rdi)
  64. 7: movq %r10,2*8(%rdi)
  65. 8: movq %r11,3*8(%rdi)
  66. 9: movq 4*8(%rsi),%r8
  67. 10: movq 5*8(%rsi),%r9
  68. 11: movq 6*8(%rsi),%r10
  69. 12: movq 7*8(%rsi),%r11
  70. 13: movq %r8,4*8(%rdi)
  71. 14: movq %r9,5*8(%rdi)
  72. 15: movq %r10,6*8(%rdi)
  73. 16: movq %r11,7*8(%rdi)
  74. leaq 64(%rsi),%rsi
  75. leaq 64(%rdi),%rdi
  76. decl %ecx
  77. jnz 1b
  78. jmp copy_user_short_string
  79. 30: shll $6,%ecx
  80. addl %ecx,%edx
  81. jmp .Lcopy_user_handle_tail
  82. _ASM_EXTABLE_CPY(1b, 30b)
  83. _ASM_EXTABLE_CPY(2b, 30b)
  84. _ASM_EXTABLE_CPY(3b, 30b)
  85. _ASM_EXTABLE_CPY(4b, 30b)
  86. _ASM_EXTABLE_CPY(5b, 30b)
  87. _ASM_EXTABLE_CPY(6b, 30b)
  88. _ASM_EXTABLE_CPY(7b, 30b)
  89. _ASM_EXTABLE_CPY(8b, 30b)
  90. _ASM_EXTABLE_CPY(9b, 30b)
  91. _ASM_EXTABLE_CPY(10b, 30b)
  92. _ASM_EXTABLE_CPY(11b, 30b)
  93. _ASM_EXTABLE_CPY(12b, 30b)
  94. _ASM_EXTABLE_CPY(13b, 30b)
  95. _ASM_EXTABLE_CPY(14b, 30b)
  96. _ASM_EXTABLE_CPY(15b, 30b)
  97. _ASM_EXTABLE_CPY(16b, 30b)
  98. SYM_FUNC_END(copy_user_generic_unrolled)
  99. EXPORT_SYMBOL(copy_user_generic_unrolled)
  100. /* Some CPUs run faster using the string copy instructions.
  101. * This is also a lot simpler. Use them when possible.
  102. *
  103. * Only 4GB of copy is supported. This shouldn't be a problem
  104. * because the kernel normally only writes from/to page sized chunks
  105. * even if user space passed a longer buffer.
  106. * And more would be dangerous because both Intel and AMD have
  107. * errata with rep movsq > 4GB. If someone feels the need to fix
  108. * this please consider this.
  109. *
  110. * Input:
  111. * rdi destination
  112. * rsi source
  113. * rdx count
  114. *
  115. * Output:
  116. * eax uncopied bytes or 0 if successful.
  117. */
  118. SYM_FUNC_START(copy_user_generic_string)
  119. ASM_STAC
  120. cmpl $8,%edx
  121. jb 2f /* less than 8 bytes, go to byte copy loop */
  122. ALIGN_DESTINATION
  123. movl %edx,%ecx
  124. shrl $3,%ecx
  125. andl $7,%edx
  126. 1: rep movsq
  127. 2: movl %edx,%ecx
  128. 3: rep movsb
  129. xorl %eax,%eax
  130. ASM_CLAC
  131. RET
  132. 11: leal (%rdx,%rcx,8),%ecx
  133. 12: movl %ecx,%edx /* ecx is zerorest also */
  134. jmp .Lcopy_user_handle_tail
  135. _ASM_EXTABLE_CPY(1b, 11b)
  136. _ASM_EXTABLE_CPY(3b, 12b)
  137. SYM_FUNC_END(copy_user_generic_string)
  138. EXPORT_SYMBOL(copy_user_generic_string)
  139. /*
  140. * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
  141. * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
  142. *
  143. * Input:
  144. * rdi destination
  145. * rsi source
  146. * rdx count
  147. *
  148. * Output:
  149. * eax uncopied bytes or 0 if successful.
  150. */
  151. SYM_FUNC_START(copy_user_enhanced_fast_string)
  152. ASM_STAC
  153. /* CPUs without FSRM should avoid rep movsb for short copies */
  154. ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM
  155. movl %edx,%ecx
  156. 1: rep movsb
  157. xorl %eax,%eax
  158. ASM_CLAC
  159. RET
  160. 12: movl %ecx,%edx /* ecx is zerorest also */
  161. jmp .Lcopy_user_handle_tail
  162. _ASM_EXTABLE_CPY(1b, 12b)
  163. SYM_FUNC_END(copy_user_enhanced_fast_string)
  164. EXPORT_SYMBOL(copy_user_enhanced_fast_string)
  165. /*
  166. * Try to copy last bytes and clear the rest if needed.
  167. * Since protection fault in copy_from/to_user is not a normal situation,
  168. * it is not necessary to optimize tail handling.
  169. * Don't try to copy the tail if machine check happened
  170. *
  171. * Input:
  172. * eax trap number written by ex_handler_copy()
  173. * rdi destination
  174. * rsi source
  175. * rdx count
  176. *
  177. * Output:
  178. * eax uncopied bytes or 0 if successful.
  179. */
  180. SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
  181. cmp $X86_TRAP_MC,%eax
  182. je 3f
  183. movl %edx,%ecx
  184. 1: rep movsb
  185. 2: mov %ecx,%eax
  186. ASM_CLAC
  187. RET
  188. 3:
  189. movl %edx,%eax
  190. ASM_CLAC
  191. RET
  192. _ASM_EXTABLE_CPY(1b, 2b)
  193. .Lcopy_user_handle_align:
  194. addl %ecx,%edx /* ecx is zerorest also */
  195. jmp .Lcopy_user_handle_tail
  196. SYM_CODE_END(.Lcopy_user_handle_tail)
  197. /*
  198. * Finish memcpy of less than 64 bytes. #AC should already be set.
  199. *
  200. * Input:
  201. * rdi destination
  202. * rsi source
  203. * rdx count (< 64)
  204. *
  205. * Output:
  206. * eax uncopied bytes or 0 if successful.
  207. */
  208. SYM_CODE_START_LOCAL(copy_user_short_string)
  209. movl %edx,%ecx
  210. andl $7,%edx
  211. shrl $3,%ecx
  212. jz .Lcopy_user_short_string_bytes
  213. 18: movq (%rsi),%r8
  214. 19: movq %r8,(%rdi)
  215. leaq 8(%rsi),%rsi
  216. leaq 8(%rdi),%rdi
  217. decl %ecx
  218. jnz 18b
  219. .Lcopy_user_short_string_bytes:
  220. andl %edx,%edx
  221. jz 23f
  222. movl %edx,%ecx
  223. 21: movb (%rsi),%al
  224. 22: movb %al,(%rdi)
  225. incq %rsi
  226. incq %rdi
  227. decl %ecx
  228. jnz 21b
  229. 23: xor %eax,%eax
  230. ASM_CLAC
  231. RET
  232. 40: leal (%rdx,%rcx,8),%edx
  233. jmp 60f
  234. 50: movl %ecx,%edx /* ecx is zerorest also */
  235. 60: jmp .Lcopy_user_handle_tail
  236. _ASM_EXTABLE_CPY(18b, 40b)
  237. _ASM_EXTABLE_CPY(19b, 40b)
  238. _ASM_EXTABLE_CPY(21b, 50b)
  239. _ASM_EXTABLE_CPY(22b, 50b)
  240. SYM_CODE_END(copy_user_short_string)
  241. /*
  242. * copy_user_nocache - Uncached memory copy with exception handling
  243. * This will force destination out of cache for more performance.
  244. *
  245. * Note: Cached memory copy is used when destination or size is not
  246. * naturally aligned. That is:
  247. * - Require 8-byte alignment when size is 8 bytes or larger.
  248. * - Require 4-byte alignment when size is 4 bytes.
  249. */
  250. SYM_FUNC_START(__copy_user_nocache)
  251. ASM_STAC
  252. /* If size is less than 8 bytes, go to 4-byte copy */
  253. cmpl $8,%edx
  254. jb .L_4b_nocache_copy_entry
  255. /* If destination is not 8-byte aligned, "cache" copy to align it */
  256. ALIGN_DESTINATION
  257. /* Set 4x8-byte copy count and remainder */
  258. movl %edx,%ecx
  259. andl $63,%edx
  260. shrl $6,%ecx
  261. jz .L_8b_nocache_copy_entry /* jump if count is 0 */
  262. /* Perform 4x8-byte nocache loop-copy */
  263. .L_4x8b_nocache_copy_loop:
  264. 1: movq (%rsi),%r8
  265. 2: movq 1*8(%rsi),%r9
  266. 3: movq 2*8(%rsi),%r10
  267. 4: movq 3*8(%rsi),%r11
  268. 5: movnti %r8,(%rdi)
  269. 6: movnti %r9,1*8(%rdi)
  270. 7: movnti %r10,2*8(%rdi)
  271. 8: movnti %r11,3*8(%rdi)
  272. 9: movq 4*8(%rsi),%r8
  273. 10: movq 5*8(%rsi),%r9
  274. 11: movq 6*8(%rsi),%r10
  275. 12: movq 7*8(%rsi),%r11
  276. 13: movnti %r8,4*8(%rdi)
  277. 14: movnti %r9,5*8(%rdi)
  278. 15: movnti %r10,6*8(%rdi)
  279. 16: movnti %r11,7*8(%rdi)
  280. leaq 64(%rsi),%rsi
  281. leaq 64(%rdi),%rdi
  282. decl %ecx
  283. jnz .L_4x8b_nocache_copy_loop
  284. /* Set 8-byte copy count and remainder */
  285. .L_8b_nocache_copy_entry:
  286. movl %edx,%ecx
  287. andl $7,%edx
  288. shrl $3,%ecx
  289. jz .L_4b_nocache_copy_entry /* jump if count is 0 */
  290. /* Perform 8-byte nocache loop-copy */
  291. .L_8b_nocache_copy_loop:
  292. 20: movq (%rsi),%r8
  293. 21: movnti %r8,(%rdi)
  294. leaq 8(%rsi),%rsi
  295. leaq 8(%rdi),%rdi
  296. decl %ecx
  297. jnz .L_8b_nocache_copy_loop
  298. /* If no byte left, we're done */
  299. .L_4b_nocache_copy_entry:
  300. andl %edx,%edx
  301. jz .L_finish_copy
  302. /* If destination is not 4-byte aligned, go to byte copy: */
  303. movl %edi,%ecx
  304. andl $3,%ecx
  305. jnz .L_1b_cache_copy_entry
  306. /* Set 4-byte copy count (1 or 0) and remainder */
  307. movl %edx,%ecx
  308. andl $3,%edx
  309. shrl $2,%ecx
  310. jz .L_1b_cache_copy_entry /* jump if count is 0 */
  311. /* Perform 4-byte nocache copy: */
  312. 30: movl (%rsi),%r8d
  313. 31: movnti %r8d,(%rdi)
  314. leaq 4(%rsi),%rsi
  315. leaq 4(%rdi),%rdi
  316. /* If no bytes left, we're done: */
  317. andl %edx,%edx
  318. jz .L_finish_copy
  319. /* Perform byte "cache" loop-copy for the remainder */
  320. .L_1b_cache_copy_entry:
  321. movl %edx,%ecx
  322. .L_1b_cache_copy_loop:
  323. 40: movb (%rsi),%al
  324. 41: movb %al,(%rdi)
  325. incq %rsi
  326. incq %rdi
  327. decl %ecx
  328. jnz .L_1b_cache_copy_loop
  329. /* Finished copying; fence the prior stores */
  330. .L_finish_copy:
  331. xorl %eax,%eax
  332. ASM_CLAC
  333. sfence
  334. RET
  335. .L_fixup_4x8b_copy:
  336. shll $6,%ecx
  337. addl %ecx,%edx
  338. jmp .L_fixup_handle_tail
  339. .L_fixup_8b_copy:
  340. lea (%rdx,%rcx,8),%rdx
  341. jmp .L_fixup_handle_tail
  342. .L_fixup_4b_copy:
  343. lea (%rdx,%rcx,4),%rdx
  344. jmp .L_fixup_handle_tail
  345. .L_fixup_1b_copy:
  346. movl %ecx,%edx
  347. .L_fixup_handle_tail:
  348. sfence
  349. jmp .Lcopy_user_handle_tail
  350. _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
  351. _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
  352. _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy)
  353. _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy)
  354. _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy)
  355. _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy)
  356. _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy)
  357. _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy)
  358. _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy)
  359. _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy)
  360. _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy)
  361. _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy)
  362. _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy)
  363. _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy)
  364. _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy)
  365. _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy)
  366. _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy)
  367. _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy)
  368. _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy)
  369. _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
  370. _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
  371. _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
  372. SYM_FUNC_END(__copy_user_nocache)
  373. EXPORT_SYMBOL(__copy_user_nocache)