usercopy_32.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * User address space access functions.
  4. * The non inlined parts of asm-i386/uaccess.h are here.
  5. *
  6. * Copyright 1997 Andi Kleen <[email protected]>
  7. * Copyright 1997 Linus Torvalds
  8. */
  9. #include <linux/export.h>
  10. #include <linux/uaccess.h>
  11. #include <asm/asm.h>
  12. #ifdef CONFIG_X86_INTEL_USERCOPY
  13. /*
  14. * Alignment at which movsl is preferred for bulk memory copies.
  15. */
  16. struct movsl_mask movsl_mask __read_mostly;
  17. #endif
  18. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  19. {
  20. #ifdef CONFIG_X86_INTEL_USERCOPY
  21. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  22. return 0;
  23. #endif
  24. return 1;
  25. }
  26. #define movsl_is_ok(a1, a2, n) \
  27. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  28. /*
  29. * Zero Userspace
  30. */
  31. #define __do_clear_user(addr,size) \
  32. do { \
  33. int __d0; \
  34. might_fault(); \
  35. __asm__ __volatile__( \
  36. ASM_STAC "\n" \
  37. "0: rep; stosl\n" \
  38. " movl %2,%0\n" \
  39. "1: rep; stosb\n" \
  40. "2: " ASM_CLAC "\n" \
  41. _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %2) \
  42. _ASM_EXTABLE_UA(1b, 2b) \
  43. : "=&c"(size), "=&D" (__d0) \
  44. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  45. } while (0)
  46. /**
  47. * clear_user - Zero a block of memory in user space.
  48. * @to: Destination address, in user space.
  49. * @n: Number of bytes to zero.
  50. *
  51. * Zero a block of memory in user space.
  52. *
  53. * Return: number of bytes that could not be cleared.
  54. * On success, this will be zero.
  55. */
  56. unsigned long
  57. clear_user(void __user *to, unsigned long n)
  58. {
  59. might_fault();
  60. if (access_ok(to, n))
  61. __do_clear_user(to, n);
  62. return n;
  63. }
  64. EXPORT_SYMBOL(clear_user);
  65. /**
  66. * __clear_user - Zero a block of memory in user space, with less checking.
  67. * @to: Destination address, in user space.
  68. * @n: Number of bytes to zero.
  69. *
  70. * Zero a block of memory in user space. Caller must check
  71. * the specified block with access_ok() before calling this function.
  72. *
  73. * Return: number of bytes that could not be cleared.
  74. * On success, this will be zero.
  75. */
  76. unsigned long
  77. __clear_user(void __user *to, unsigned long n)
  78. {
  79. __do_clear_user(to, n);
  80. return n;
  81. }
  82. EXPORT_SYMBOL(__clear_user);
  83. #ifdef CONFIG_X86_INTEL_USERCOPY
  84. static unsigned long
  85. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  86. {
  87. int d0, d1;
  88. __asm__ __volatile__(
  89. " .align 2,0x90\n"
  90. "1: movl 32(%4), %%eax\n"
  91. " cmpl $67, %0\n"
  92. " jbe 3f\n"
  93. "2: movl 64(%4), %%eax\n"
  94. " .align 2,0x90\n"
  95. "3: movl 0(%4), %%eax\n"
  96. "4: movl 4(%4), %%edx\n"
  97. "5: movl %%eax, 0(%3)\n"
  98. "6: movl %%edx, 4(%3)\n"
  99. "7: movl 8(%4), %%eax\n"
  100. "8: movl 12(%4),%%edx\n"
  101. "9: movl %%eax, 8(%3)\n"
  102. "10: movl %%edx, 12(%3)\n"
  103. "11: movl 16(%4), %%eax\n"
  104. "12: movl 20(%4), %%edx\n"
  105. "13: movl %%eax, 16(%3)\n"
  106. "14: movl %%edx, 20(%3)\n"
  107. "15: movl 24(%4), %%eax\n"
  108. "16: movl 28(%4), %%edx\n"
  109. "17: movl %%eax, 24(%3)\n"
  110. "18: movl %%edx, 28(%3)\n"
  111. "19: movl 32(%4), %%eax\n"
  112. "20: movl 36(%4), %%edx\n"
  113. "21: movl %%eax, 32(%3)\n"
  114. "22: movl %%edx, 36(%3)\n"
  115. "23: movl 40(%4), %%eax\n"
  116. "24: movl 44(%4), %%edx\n"
  117. "25: movl %%eax, 40(%3)\n"
  118. "26: movl %%edx, 44(%3)\n"
  119. "27: movl 48(%4), %%eax\n"
  120. "28: movl 52(%4), %%edx\n"
  121. "29: movl %%eax, 48(%3)\n"
  122. "30: movl %%edx, 52(%3)\n"
  123. "31: movl 56(%4), %%eax\n"
  124. "32: movl 60(%4), %%edx\n"
  125. "33: movl %%eax, 56(%3)\n"
  126. "34: movl %%edx, 60(%3)\n"
  127. " addl $-64, %0\n"
  128. " addl $64, %4\n"
  129. " addl $64, %3\n"
  130. " cmpl $63, %0\n"
  131. " ja 1b\n"
  132. "35: movl %0, %%eax\n"
  133. " shrl $2, %0\n"
  134. " andl $3, %%eax\n"
  135. " cld\n"
  136. "99: rep; movsl\n"
  137. "36: movl %%eax, %0\n"
  138. "37: rep; movsb\n"
  139. "100:\n"
  140. _ASM_EXTABLE_UA(1b, 100b)
  141. _ASM_EXTABLE_UA(2b, 100b)
  142. _ASM_EXTABLE_UA(3b, 100b)
  143. _ASM_EXTABLE_UA(4b, 100b)
  144. _ASM_EXTABLE_UA(5b, 100b)
  145. _ASM_EXTABLE_UA(6b, 100b)
  146. _ASM_EXTABLE_UA(7b, 100b)
  147. _ASM_EXTABLE_UA(8b, 100b)
  148. _ASM_EXTABLE_UA(9b, 100b)
  149. _ASM_EXTABLE_UA(10b, 100b)
  150. _ASM_EXTABLE_UA(11b, 100b)
  151. _ASM_EXTABLE_UA(12b, 100b)
  152. _ASM_EXTABLE_UA(13b, 100b)
  153. _ASM_EXTABLE_UA(14b, 100b)
  154. _ASM_EXTABLE_UA(15b, 100b)
  155. _ASM_EXTABLE_UA(16b, 100b)
  156. _ASM_EXTABLE_UA(17b, 100b)
  157. _ASM_EXTABLE_UA(18b, 100b)
  158. _ASM_EXTABLE_UA(19b, 100b)
  159. _ASM_EXTABLE_UA(20b, 100b)
  160. _ASM_EXTABLE_UA(21b, 100b)
  161. _ASM_EXTABLE_UA(22b, 100b)
  162. _ASM_EXTABLE_UA(23b, 100b)
  163. _ASM_EXTABLE_UA(24b, 100b)
  164. _ASM_EXTABLE_UA(25b, 100b)
  165. _ASM_EXTABLE_UA(26b, 100b)
  166. _ASM_EXTABLE_UA(27b, 100b)
  167. _ASM_EXTABLE_UA(28b, 100b)
  168. _ASM_EXTABLE_UA(29b, 100b)
  169. _ASM_EXTABLE_UA(30b, 100b)
  170. _ASM_EXTABLE_UA(31b, 100b)
  171. _ASM_EXTABLE_UA(32b, 100b)
  172. _ASM_EXTABLE_UA(33b, 100b)
  173. _ASM_EXTABLE_UA(34b, 100b)
  174. _ASM_EXTABLE_UA(35b, 100b)
  175. _ASM_EXTABLE_UA(36b, 100b)
  176. _ASM_EXTABLE_UA(37b, 100b)
  177. _ASM_EXTABLE_TYPE_REG(99b, 100b, EX_TYPE_UCOPY_LEN4, %%eax)
  178. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  179. : "1"(to), "2"(from), "0"(size)
  180. : "eax", "edx", "memory");
  181. return size;
  182. }
  183. static unsigned long __copy_user_intel_nocache(void *to,
  184. const void __user *from, unsigned long size)
  185. {
  186. int d0, d1;
  187. __asm__ __volatile__(
  188. " .align 2,0x90\n"
  189. "0: movl 32(%4), %%eax\n"
  190. " cmpl $67, %0\n"
  191. " jbe 2f\n"
  192. "1: movl 64(%4), %%eax\n"
  193. " .align 2,0x90\n"
  194. "2: movl 0(%4), %%eax\n"
  195. "21: movl 4(%4), %%edx\n"
  196. " movnti %%eax, 0(%3)\n"
  197. " movnti %%edx, 4(%3)\n"
  198. "3: movl 8(%4), %%eax\n"
  199. "31: movl 12(%4),%%edx\n"
  200. " movnti %%eax, 8(%3)\n"
  201. " movnti %%edx, 12(%3)\n"
  202. "4: movl 16(%4), %%eax\n"
  203. "41: movl 20(%4), %%edx\n"
  204. " movnti %%eax, 16(%3)\n"
  205. " movnti %%edx, 20(%3)\n"
  206. "10: movl 24(%4), %%eax\n"
  207. "51: movl 28(%4), %%edx\n"
  208. " movnti %%eax, 24(%3)\n"
  209. " movnti %%edx, 28(%3)\n"
  210. "11: movl 32(%4), %%eax\n"
  211. "61: movl 36(%4), %%edx\n"
  212. " movnti %%eax, 32(%3)\n"
  213. " movnti %%edx, 36(%3)\n"
  214. "12: movl 40(%4), %%eax\n"
  215. "71: movl 44(%4), %%edx\n"
  216. " movnti %%eax, 40(%3)\n"
  217. " movnti %%edx, 44(%3)\n"
  218. "13: movl 48(%4), %%eax\n"
  219. "81: movl 52(%4), %%edx\n"
  220. " movnti %%eax, 48(%3)\n"
  221. " movnti %%edx, 52(%3)\n"
  222. "14: movl 56(%4), %%eax\n"
  223. "91: movl 60(%4), %%edx\n"
  224. " movnti %%eax, 56(%3)\n"
  225. " movnti %%edx, 60(%3)\n"
  226. " addl $-64, %0\n"
  227. " addl $64, %4\n"
  228. " addl $64, %3\n"
  229. " cmpl $63, %0\n"
  230. " ja 0b\n"
  231. " sfence \n"
  232. "5: movl %0, %%eax\n"
  233. " shrl $2, %0\n"
  234. " andl $3, %%eax\n"
  235. " cld\n"
  236. "6: rep; movsl\n"
  237. " movl %%eax,%0\n"
  238. "7: rep; movsb\n"
  239. "8:\n"
  240. _ASM_EXTABLE_UA(0b, 8b)
  241. _ASM_EXTABLE_UA(1b, 8b)
  242. _ASM_EXTABLE_UA(2b, 8b)
  243. _ASM_EXTABLE_UA(21b, 8b)
  244. _ASM_EXTABLE_UA(3b, 8b)
  245. _ASM_EXTABLE_UA(31b, 8b)
  246. _ASM_EXTABLE_UA(4b, 8b)
  247. _ASM_EXTABLE_UA(41b, 8b)
  248. _ASM_EXTABLE_UA(10b, 8b)
  249. _ASM_EXTABLE_UA(51b, 8b)
  250. _ASM_EXTABLE_UA(11b, 8b)
  251. _ASM_EXTABLE_UA(61b, 8b)
  252. _ASM_EXTABLE_UA(12b, 8b)
  253. _ASM_EXTABLE_UA(71b, 8b)
  254. _ASM_EXTABLE_UA(13b, 8b)
  255. _ASM_EXTABLE_UA(81b, 8b)
  256. _ASM_EXTABLE_UA(14b, 8b)
  257. _ASM_EXTABLE_UA(91b, 8b)
  258. _ASM_EXTABLE_TYPE_REG(6b, 8b, EX_TYPE_UCOPY_LEN4, %%eax)
  259. _ASM_EXTABLE_UA(7b, 8b)
  260. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  261. : "1"(to), "2"(from), "0"(size)
  262. : "eax", "edx", "memory");
  263. return size;
  264. }
  265. #else
  266. /*
  267. * Leave these declared but undefined. They should not be any references to
  268. * them
  269. */
  270. unsigned long __copy_user_intel(void __user *to, const void *from,
  271. unsigned long size);
  272. #endif /* CONFIG_X86_INTEL_USERCOPY */
  273. /* Generic arbitrary sized copy. */
  274. #define __copy_user(to, from, size) \
  275. do { \
  276. int __d0, __d1, __d2; \
  277. __asm__ __volatile__( \
  278. " cmp $7,%0\n" \
  279. " jbe 1f\n" \
  280. " movl %1,%0\n" \
  281. " negl %0\n" \
  282. " andl $7,%0\n" \
  283. " subl %0,%3\n" \
  284. "4: rep; movsb\n" \
  285. " movl %3,%0\n" \
  286. " shrl $2,%0\n" \
  287. " andl $3,%3\n" \
  288. " .align 2,0x90\n" \
  289. "0: rep; movsl\n" \
  290. " movl %3,%0\n" \
  291. "1: rep; movsb\n" \
  292. "2:\n" \
  293. _ASM_EXTABLE_TYPE_REG(4b, 2b, EX_TYPE_UCOPY_LEN1, %3) \
  294. _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %3) \
  295. _ASM_EXTABLE_UA(1b, 2b) \
  296. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  297. : "3"(size), "0"(size), "1"(to), "2"(from) \
  298. : "memory"); \
  299. } while (0)
  300. unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
  301. {
  302. __uaccess_begin_nospec();
  303. if (movsl_is_ok(to, from, n))
  304. __copy_user(to, from, n);
  305. else
  306. n = __copy_user_intel(to, from, n);
  307. __uaccess_end();
  308. return n;
  309. }
  310. EXPORT_SYMBOL(__copy_user_ll);
  311. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  312. unsigned long n)
  313. {
  314. __uaccess_begin_nospec();
  315. #ifdef CONFIG_X86_INTEL_USERCOPY
  316. if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
  317. n = __copy_user_intel_nocache(to, from, n);
  318. else
  319. __copy_user(to, from, n);
  320. #else
  321. __copy_user(to, from, n);
  322. #endif
  323. __uaccess_end();
  324. return n;
  325. }
  326. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);