uaccess.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_GENERIC_UACCESS_H
  3. #define __ASM_GENERIC_UACCESS_H
  4. /*
  5. * User space memory access functions, these should work
  6. * on any machine that has kernel and user data in the same
  7. * address space, e.g. all NOMMU machines.
  8. */
  9. #include <linux/string.h>
  10. #include <asm-generic/access_ok.h>
  11. #ifdef CONFIG_UACCESS_MEMCPY
  12. #include <asm/unaligned.h>
  13. static __always_inline int
  14. __get_user_fn(size_t size, const void __user *from, void *to)
  15. {
  16. BUILD_BUG_ON(!__builtin_constant_p(size));
  17. switch (size) {
  18. case 1:
  19. *(u8 *)to = *((u8 __force *)from);
  20. return 0;
  21. case 2:
  22. *(u16 *)to = get_unaligned((u16 __force *)from);
  23. return 0;
  24. case 4:
  25. *(u32 *)to = get_unaligned((u32 __force *)from);
  26. return 0;
  27. case 8:
  28. *(u64 *)to = get_unaligned((u64 __force *)from);
  29. return 0;
  30. default:
  31. BUILD_BUG();
  32. return 0;
  33. }
  34. }
  35. #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
  36. static __always_inline int
  37. __put_user_fn(size_t size, void __user *to, void *from)
  38. {
  39. BUILD_BUG_ON(!__builtin_constant_p(size));
  40. switch (size) {
  41. case 1:
  42. *(u8 __force *)to = *(u8 *)from;
  43. return 0;
  44. case 2:
  45. put_unaligned(*(u16 *)from, (u16 __force *)to);
  46. return 0;
  47. case 4:
  48. put_unaligned(*(u32 *)from, (u32 __force *)to);
  49. return 0;
  50. case 8:
  51. put_unaligned(*(u64 *)from, (u64 __force *)to);
  52. return 0;
  53. default:
  54. BUILD_BUG();
  55. return 0;
  56. }
  57. }
  58. #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
  59. #define __get_kernel_nofault(dst, src, type, err_label) \
  60. do { \
  61. *((type *)dst) = get_unaligned((type *)(src)); \
  62. if (0) /* make sure the label looks used to the compiler */ \
  63. goto err_label; \
  64. } while (0)
  65. #define __put_kernel_nofault(dst, src, type, err_label) \
  66. do { \
  67. put_unaligned(*((type *)src), (type *)(dst)); \
  68. if (0) /* make sure the label looks used to the compiler */ \
  69. goto err_label; \
  70. } while (0)
  71. static inline __must_check unsigned long
  72. raw_copy_from_user(void *to, const void __user * from, unsigned long n)
  73. {
  74. memcpy(to, (const void __force *)from, n);
  75. return 0;
  76. }
  77. static inline __must_check unsigned long
  78. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  79. {
  80. memcpy((void __force *)to, from, n);
  81. return 0;
  82. }
  83. #define INLINE_COPY_FROM_USER
  84. #define INLINE_COPY_TO_USER
  85. #endif /* CONFIG_UACCESS_MEMCPY */
  86. /*
  87. * These are the main single-value transfer routines. They automatically
  88. * use the right size if we just have the right pointer type.
  89. * This version just falls back to copy_{from,to}_user, which should
  90. * provide a fast-path for small values.
  91. */
  92. #define __put_user(x, ptr) \
  93. ({ \
  94. __typeof__(*(ptr)) __x = (x); \
  95. int __pu_err = -EFAULT; \
  96. __chk_user_ptr(ptr); \
  97. switch (sizeof (*(ptr))) { \
  98. case 1: \
  99. case 2: \
  100. case 4: \
  101. case 8: \
  102. __pu_err = __put_user_fn(sizeof (*(ptr)), \
  103. ptr, &__x); \
  104. break; \
  105. default: \
  106. __put_user_bad(); \
  107. break; \
  108. } \
  109. __pu_err; \
  110. })
  111. #define put_user(x, ptr) \
  112. ({ \
  113. void __user *__p = (ptr); \
  114. might_fault(); \
  115. access_ok(__p, sizeof(*ptr)) ? \
  116. __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
  117. -EFAULT; \
  118. })
  119. #ifndef __put_user_fn
  120. static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
  121. {
  122. return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
  123. }
  124. #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
  125. #endif
  126. extern int __put_user_bad(void) __attribute__((noreturn));
  127. #define __get_user(x, ptr) \
  128. ({ \
  129. int __gu_err = -EFAULT; \
  130. __chk_user_ptr(ptr); \
  131. switch (sizeof(*(ptr))) { \
  132. case 1: { \
  133. unsigned char __x = 0; \
  134. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  135. ptr, &__x); \
  136. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  137. break; \
  138. }; \
  139. case 2: { \
  140. unsigned short __x = 0; \
  141. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  142. ptr, &__x); \
  143. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  144. break; \
  145. }; \
  146. case 4: { \
  147. unsigned int __x = 0; \
  148. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  149. ptr, &__x); \
  150. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  151. break; \
  152. }; \
  153. case 8: { \
  154. unsigned long long __x = 0; \
  155. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  156. ptr, &__x); \
  157. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  158. break; \
  159. }; \
  160. default: \
  161. __get_user_bad(); \
  162. break; \
  163. } \
  164. __gu_err; \
  165. })
  166. #define get_user(x, ptr) \
  167. ({ \
  168. const void __user *__p = (ptr); \
  169. might_fault(); \
  170. access_ok(__p, sizeof(*ptr)) ? \
  171. __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
  172. ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
  173. })
  174. #ifndef __get_user_fn
  175. static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
  176. {
  177. return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
  178. }
  179. #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
  180. #endif
  181. extern int __get_user_bad(void) __attribute__((noreturn));
  182. /*
  183. * Zero Userspace
  184. */
  185. #ifndef __clear_user
  186. static inline __must_check unsigned long
  187. __clear_user(void __user *to, unsigned long n)
  188. {
  189. memset((void __force *)to, 0, n);
  190. return 0;
  191. }
  192. #endif
  193. static inline __must_check unsigned long
  194. clear_user(void __user *to, unsigned long n)
  195. {
  196. might_fault();
  197. if (!access_ok(to, n))
  198. return n;
  199. return __clear_user(to, n);
  200. }
  201. #include <asm/extable.h>
  202. __must_check long strncpy_from_user(char *dst, const char __user *src,
  203. long count);
  204. __must_check long strnlen_user(const char __user *src, long n);
  205. #endif /* __ASM_GENERIC_UACCESS_H */