uaccess_64.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_UACCESS_64_H
  3. #define _ASM_X86_UACCESS_64_H
  4. /*
  5. * User space memory access functions
  6. */
  7. #include <linux/compiler.h>
  8. #include <linux/lockdep.h>
  9. #include <linux/kasan-checks.h>
  10. #include <asm/alternative.h>
  11. #include <asm/cpufeatures.h>
  12. #include <asm/page.h>
  13. /*
  14. * Copy To/From Userspace
  15. */
  16. /* Handles exceptions in both to and from, but doesn't do access_ok */
  17. __must_check unsigned long
  18. copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
  19. __must_check unsigned long
  20. copy_user_generic_string(void *to, const void *from, unsigned len);
  21. __must_check unsigned long
  22. copy_user_generic_unrolled(void *to, const void *from, unsigned len);
  23. static __always_inline __must_check unsigned long
  24. copy_user_generic(void *to, const void *from, unsigned len)
  25. {
  26. unsigned ret;
  27. /*
  28. * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
  29. * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
  30. * Otherwise, use copy_user_generic_unrolled.
  31. */
  32. alternative_call_2(copy_user_generic_unrolled,
  33. copy_user_generic_string,
  34. X86_FEATURE_REP_GOOD,
  35. copy_user_enhanced_fast_string,
  36. X86_FEATURE_ERMS,
  37. ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
  38. "=d" (len)),
  39. "1" (to), "2" (from), "3" (len)
  40. : "memory", "rcx", "r8", "r9", "r10", "r11");
  41. return ret;
  42. }
  43. static __always_inline __must_check unsigned long
  44. raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
  45. {
  46. return copy_user_generic(dst, (__force void *)src, size);
  47. }
  48. static __always_inline __must_check unsigned long
  49. raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
  50. {
  51. return copy_user_generic((__force void *)dst, src, size);
  52. }
  53. extern long __copy_user_nocache(void *dst, const void __user *src,
  54. unsigned size, int zerorest);
  55. extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
  56. extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
  57. size_t len);
  58. static inline int
  59. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  60. unsigned size)
  61. {
  62. kasan_check_write(dst, size);
  63. return __copy_user_nocache(dst, src, size, 0);
  64. }
  65. static inline int
  66. __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
  67. {
  68. kasan_check_write(dst, size);
  69. return __copy_user_flushcache(dst, src, size);
  70. }
  71. /*
  72. * Zero Userspace.
  73. */
  74. __must_check unsigned long
  75. clear_user_original(void __user *addr, unsigned long len);
  76. __must_check unsigned long
  77. clear_user_rep_good(void __user *addr, unsigned long len);
  78. __must_check unsigned long
  79. clear_user_erms(void __user *addr, unsigned long len);
  80. static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
  81. {
  82. might_fault();
  83. stac();
  84. /*
  85. * No memory constraint because it doesn't change any memory gcc
  86. * knows about.
  87. */
  88. asm volatile(
  89. "1:\n\t"
  90. ALTERNATIVE_3("rep stosb",
  91. "call clear_user_erms", ALT_NOT(X86_FEATURE_FSRM),
  92. "call clear_user_rep_good", ALT_NOT(X86_FEATURE_ERMS),
  93. "call clear_user_original", ALT_NOT(X86_FEATURE_REP_GOOD))
  94. "2:\n"
  95. _ASM_EXTABLE_UA(1b, 2b)
  96. : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
  97. : "a" (0)
  98. /* rep_good clobbers %rdx */
  99. : "rdx");
  100. clac();
  101. return size;
  102. }
  103. static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
  104. {
  105. if (access_ok(to, n))
  106. return __clear_user(to, n);
  107. return n;
  108. }
  109. #endif /* _ASM_X86_UACCESS_64_H */