uaccess_64.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_UACCESS_H
  3. #define _ASM_UACCESS_H
  4. /*
  5. * User space memory access functions
  6. */
  7. #include <linux/compiler.h>
  8. #include <linux/string.h>
  9. #include <asm/asi.h>
  10. #include <asm/spitfire.h>
  11. #include <asm/processor.h>
  12. #include <asm-generic/access_ok.h>
  13. /*
  14. * Sparc64 is segmented, though more like the M68K than the I386.
  15. * We use the secondary ASI to address user memory, which references a
  16. * completely different VM map, thus there is zero chance of the user
  17. * doing something queer and tricking us into poking kernel memory.
  18. */
  19. /*
  20. * Test whether a block of memory is a valid user space address.
  21. * Returns 0 if the range is valid, nonzero otherwise.
  22. */
  23. static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  24. {
  25. if (__builtin_constant_p(size))
  26. return addr > limit - size;
  27. addr += size;
  28. if (addr < size)
  29. return true;
  30. return addr > limit;
  31. }
  32. #define __range_not_ok(addr, size, limit) \
  33. ({ \
  34. __chk_user_ptr(addr); \
  35. __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  36. })
  37. void __retl_efault(void);
  38. /* Uh, these should become the main single-value transfer routines..
  39. * They automatically use the right size if we just have the right
  40. * pointer type..
  41. *
  42. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  43. * and yet we don't want to do any pointers, because that is too much
  44. * of a performance impact. Thus we have a few rather ugly macros here,
  45. * and hide all the ugliness from the user.
  46. */
  47. #define put_user(x, ptr) ({ \
  48. unsigned long __pu_addr = (unsigned long)(ptr); \
  49. __chk_user_ptr(ptr); \
  50. __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
  51. })
  52. #define get_user(x, ptr) ({ \
  53. unsigned long __gu_addr = (unsigned long)(ptr); \
  54. __chk_user_ptr(ptr); \
  55. __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
  56. })
  57. #define __put_user(x, ptr) put_user(x, ptr)
  58. #define __get_user(x, ptr) get_user(x, ptr)
  59. struct __large_struct { unsigned long buf[100]; };
  60. #define __m(x) ((struct __large_struct *)(x))
  61. #define __put_kernel_nofault(dst, src, type, label) \
  62. do { \
  63. type *addr = (type __force *)(dst); \
  64. type data = *(type *)src; \
  65. register int __pu_ret; \
  66. switch (sizeof(type)) { \
  67. case 1: __put_kernel_asm(data, b, addr, __pu_ret); break; \
  68. case 2: __put_kernel_asm(data, h, addr, __pu_ret); break; \
  69. case 4: __put_kernel_asm(data, w, addr, __pu_ret); break; \
  70. case 8: __put_kernel_asm(data, x, addr, __pu_ret); break; \
  71. default: __pu_ret = __put_user_bad(); break; \
  72. } \
  73. if (__pu_ret) \
  74. goto label; \
  75. } while (0)
  76. #define __put_kernel_asm(x, size, addr, ret) \
  77. __asm__ __volatile__( \
  78. "/* Put kernel asm, inline. */\n" \
  79. "1:\t" "st"#size " %1, [%2]\n\t" \
  80. "clr %0\n" \
  81. "2:\n\n\t" \
  82. ".section .fixup,#alloc,#execinstr\n\t" \
  83. ".align 4\n" \
  84. "3:\n\t" \
  85. "sethi %%hi(2b), %0\n\t" \
  86. "jmpl %0 + %%lo(2b), %%g0\n\t" \
  87. " mov %3, %0\n\n\t" \
  88. ".previous\n\t" \
  89. ".section __ex_table,\"a\"\n\t" \
  90. ".align 4\n\t" \
  91. ".word 1b, 3b\n\t" \
  92. ".previous\n\n\t" \
  93. : "=r" (ret) : "r" (x), "r" (__m(addr)), \
  94. "i" (-EFAULT))
  95. #define __put_user_nocheck(data, addr, size) ({ \
  96. register int __pu_ret; \
  97. switch (size) { \
  98. case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
  99. case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
  100. case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
  101. case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
  102. default: __pu_ret = __put_user_bad(); break; \
  103. } \
  104. __pu_ret; \
  105. })
  106. #define __put_user_asm(x, size, addr, ret) \
  107. __asm__ __volatile__( \
  108. "/* Put user asm, inline. */\n" \
  109. "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
  110. "clr %0\n" \
  111. "2:\n\n\t" \
  112. ".section .fixup,#alloc,#execinstr\n\t" \
  113. ".align 4\n" \
  114. "3:\n\t" \
  115. "sethi %%hi(2b), %0\n\t" \
  116. "jmpl %0 + %%lo(2b), %%g0\n\t" \
  117. " mov %3, %0\n\n\t" \
  118. ".previous\n\t" \
  119. ".section __ex_table,\"a\"\n\t" \
  120. ".align 4\n\t" \
  121. ".word 1b, 3b\n\t" \
  122. ".previous\n\n\t" \
  123. : "=r" (ret) : "r" (x), "r" (__m(addr)), \
  124. "i" (-EFAULT))
  125. int __put_user_bad(void);
  126. #define __get_kernel_nofault(dst, src, type, label) \
  127. do { \
  128. type *addr = (type __force *)(src); \
  129. register int __gu_ret; \
  130. register unsigned long __gu_val; \
  131. switch (sizeof(type)) { \
  132. case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
  133. case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
  134. case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
  135. case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break; \
  136. default: \
  137. __gu_val = 0; \
  138. __gu_ret = __get_user_bad(); \
  139. break; \
  140. } \
  141. if (__gu_ret) \
  142. goto label; \
  143. *(type *)dst = (__force type) __gu_val; \
  144. } while (0)
  145. #define __get_kernel_asm(x, size, addr, ret) \
  146. __asm__ __volatile__( \
  147. "/* Get kernel asm, inline. */\n" \
  148. "1:\t" "ld"#size " [%2], %1\n\t" \
  149. "clr %0\n" \
  150. "2:\n\n\t" \
  151. ".section .fixup,#alloc,#execinstr\n\t" \
  152. ".align 4\n" \
  153. "3:\n\t" \
  154. "sethi %%hi(2b), %0\n\t" \
  155. "clr %1\n\t" \
  156. "jmpl %0 + %%lo(2b), %%g0\n\t" \
  157. " mov %3, %0\n\n\t" \
  158. ".previous\n\t" \
  159. ".section __ex_table,\"a\"\n\t" \
  160. ".align 4\n\t" \
  161. ".word 1b, 3b\n\n\t" \
  162. ".previous\n\t" \
  163. : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
  164. "i" (-EFAULT))
  165. #define __get_user_nocheck(data, addr, size, type) ({ \
  166. register int __gu_ret; \
  167. register unsigned long __gu_val; \
  168. switch (size) { \
  169. case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
  170. case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
  171. case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
  172. case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
  173. default: \
  174. __gu_val = 0; \
  175. __gu_ret = __get_user_bad(); \
  176. break; \
  177. } \
  178. data = (__force type) __gu_val; \
  179. __gu_ret; \
  180. })
  181. #define __get_user_asm(x, size, addr, ret) \
  182. __asm__ __volatile__( \
  183. "/* Get user asm, inline. */\n" \
  184. "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
  185. "clr %0\n" \
  186. "2:\n\n\t" \
  187. ".section .fixup,#alloc,#execinstr\n\t" \
  188. ".align 4\n" \
  189. "3:\n\t" \
  190. "sethi %%hi(2b), %0\n\t" \
  191. "clr %1\n\t" \
  192. "jmpl %0 + %%lo(2b), %%g0\n\t" \
  193. " mov %3, %0\n\n\t" \
  194. ".previous\n\t" \
  195. ".section __ex_table,\"a\"\n\t" \
  196. ".align 4\n\t" \
  197. ".word 1b, 3b\n\n\t" \
  198. ".previous\n\t" \
  199. : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
  200. "i" (-EFAULT))
  201. int __get_user_bad(void);
  202. unsigned long __must_check raw_copy_from_user(void *to,
  203. const void __user *from,
  204. unsigned long size);
  205. unsigned long __must_check raw_copy_to_user(void __user *to,
  206. const void *from,
  207. unsigned long size);
  208. #define INLINE_COPY_FROM_USER
  209. #define INLINE_COPY_TO_USER
  210. unsigned long __must_check raw_copy_in_user(void __user *to,
  211. const void __user *from,
  212. unsigned long size);
  213. unsigned long __must_check __clear_user(void __user *, unsigned long);
  214. #define clear_user __clear_user
  215. __must_check long strnlen_user(const char __user *str, long n);
  216. struct pt_regs;
  217. unsigned long compute_effective_address(struct pt_regs *,
  218. unsigned int insn,
  219. unsigned int rd);
  220. #endif /* _ASM_UACCESS_H */