uaccess.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * S390 version
  4. * Copyright IBM Corp. 1999, 2000
  5. * Author(s): Hartmut Penner ([email protected]),
  6. * Martin Schwidefsky ([email protected])
  7. *
  8. * Derived from "include/asm-i386/uaccess.h"
  9. */
  10. #ifndef __S390_UACCESS_H
  11. #define __S390_UACCESS_H
  12. /*
  13. * User space memory access functions
  14. */
  15. #include <asm/asm-extable.h>
  16. #include <asm/processor.h>
  17. #include <asm/ctl_reg.h>
  18. #include <asm/extable.h>
  19. #include <asm/facility.h>
  20. #include <asm-generic/access_ok.h>
  21. void debug_user_asce(int exit);
  22. unsigned long __must_check
  23. raw_copy_from_user(void *to, const void __user *from, unsigned long n);
  24. unsigned long __must_check
  25. raw_copy_to_user(void __user *to, const void *from, unsigned long n);
  26. #ifndef CONFIG_KASAN
  27. #define INLINE_COPY_FROM_USER
  28. #define INLINE_COPY_TO_USER
  29. #endif
  30. unsigned long __must_check
  31. _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
  32. static __always_inline unsigned long __must_check
  33. copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
  34. {
  35. if (check_copy_size(to, n, false))
  36. n = _copy_from_user_key(to, from, n, key);
  37. return n;
  38. }
  39. unsigned long __must_check
  40. _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
  41. static __always_inline unsigned long __must_check
  42. copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
  43. {
  44. if (check_copy_size(from, n, true))
  45. n = _copy_to_user_key(to, from, n, key);
  46. return n;
  47. }
  48. union oac {
  49. unsigned int val;
  50. struct {
  51. struct {
  52. unsigned short key : 4;
  53. unsigned short : 4;
  54. unsigned short as : 2;
  55. unsigned short : 4;
  56. unsigned short k : 1;
  57. unsigned short a : 1;
  58. } oac1;
  59. struct {
  60. unsigned short key : 4;
  61. unsigned short : 4;
  62. unsigned short as : 2;
  63. unsigned short : 4;
  64. unsigned short k : 1;
  65. unsigned short a : 1;
  66. } oac2;
  67. };
  68. };
  69. int __noreturn __put_user_bad(void);
  70. #define __put_user_asm(to, from, size) \
  71. ({ \
  72. union oac __oac_spec = { \
  73. .oac1.as = PSW_BITS_AS_SECONDARY, \
  74. .oac1.a = 1, \
  75. }; \
  76. int __rc; \
  77. \
  78. asm volatile( \
  79. " lr 0,%[spec]\n" \
  80. "0: mvcos %[_to],%[_from],%[_size]\n" \
  81. "1: xr %[rc],%[rc]\n" \
  82. "2:\n" \
  83. EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
  84. EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
  85. : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
  86. : [_size] "d" (size), [_from] "Q" (*(from)), \
  87. [spec] "d" (__oac_spec.val) \
  88. : "cc", "0"); \
  89. __rc; \
  90. })
  91. static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
  92. {
  93. int rc;
  94. switch (size) {
  95. case 1:
  96. rc = __put_user_asm((unsigned char __user *)ptr,
  97. (unsigned char *)x,
  98. size);
  99. break;
  100. case 2:
  101. rc = __put_user_asm((unsigned short __user *)ptr,
  102. (unsigned short *)x,
  103. size);
  104. break;
  105. case 4:
  106. rc = __put_user_asm((unsigned int __user *)ptr,
  107. (unsigned int *)x,
  108. size);
  109. break;
  110. case 8:
  111. rc = __put_user_asm((unsigned long __user *)ptr,
  112. (unsigned long *)x,
  113. size);
  114. break;
  115. default:
  116. __put_user_bad();
  117. break;
  118. }
  119. return rc;
  120. }
  121. int __noreturn __get_user_bad(void);
  122. #define __get_user_asm(to, from, size) \
  123. ({ \
  124. union oac __oac_spec = { \
  125. .oac2.as = PSW_BITS_AS_SECONDARY, \
  126. .oac2.a = 1, \
  127. }; \
  128. int __rc; \
  129. \
  130. asm volatile( \
  131. " lr 0,%[spec]\n" \
  132. "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
  133. "1: xr %[rc],%[rc]\n" \
  134. "2:\n" \
  135. EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
  136. EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
  137. : [rc] "=&d" (__rc), "=Q" (*(to)) \
  138. : [_size] "d" (size), [_from] "Q" (*(from)), \
  139. [spec] "d" (__oac_spec.val), [_to] "a" (to), \
  140. [_ksize] "K" (size) \
  141. : "cc", "0"); \
  142. __rc; \
  143. })
  144. static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
  145. {
  146. int rc;
  147. switch (size) {
  148. case 1:
  149. rc = __get_user_asm((unsigned char *)x,
  150. (unsigned char __user *)ptr,
  151. size);
  152. break;
  153. case 2:
  154. rc = __get_user_asm((unsigned short *)x,
  155. (unsigned short __user *)ptr,
  156. size);
  157. break;
  158. case 4:
  159. rc = __get_user_asm((unsigned int *)x,
  160. (unsigned int __user *)ptr,
  161. size);
  162. break;
  163. case 8:
  164. rc = __get_user_asm((unsigned long *)x,
  165. (unsigned long __user *)ptr,
  166. size);
  167. break;
  168. default:
  169. __get_user_bad();
  170. break;
  171. }
  172. return rc;
  173. }
  174. /*
  175. * These are the main single-value transfer routines. They automatically
  176. * use the right size if we just have the right pointer type.
  177. */
  178. #define __put_user(x, ptr) \
  179. ({ \
  180. __typeof__(*(ptr)) __x = (x); \
  181. int __pu_err = -EFAULT; \
  182. \
  183. __chk_user_ptr(ptr); \
  184. switch (sizeof(*(ptr))) { \
  185. case 1: \
  186. case 2: \
  187. case 4: \
  188. case 8: \
  189. __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
  190. break; \
  191. default: \
  192. __put_user_bad(); \
  193. break; \
  194. } \
  195. __builtin_expect(__pu_err, 0); \
  196. })
  197. #define put_user(x, ptr) \
  198. ({ \
  199. might_fault(); \
  200. __put_user(x, ptr); \
  201. })
  202. #define __get_user(x, ptr) \
  203. ({ \
  204. int __gu_err = -EFAULT; \
  205. \
  206. __chk_user_ptr(ptr); \
  207. switch (sizeof(*(ptr))) { \
  208. case 1: { \
  209. unsigned char __x; \
  210. \
  211. __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
  212. (x) = *(__force __typeof__(*(ptr)) *)&__x; \
  213. break; \
  214. }; \
  215. case 2: { \
  216. unsigned short __x; \
  217. \
  218. __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
  219. (x) = *(__force __typeof__(*(ptr)) *)&__x; \
  220. break; \
  221. }; \
  222. case 4: { \
  223. unsigned int __x; \
  224. \
  225. __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
  226. (x) = *(__force __typeof__(*(ptr)) *)&__x; \
  227. break; \
  228. }; \
  229. case 8: { \
  230. unsigned long __x; \
  231. \
  232. __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
  233. (x) = *(__force __typeof__(*(ptr)) *)&__x; \
  234. break; \
  235. }; \
  236. default: \
  237. __get_user_bad(); \
  238. break; \
  239. } \
  240. __builtin_expect(__gu_err, 0); \
  241. })
  242. #define get_user(x, ptr) \
  243. ({ \
  244. might_fault(); \
  245. __get_user(x, ptr); \
  246. })
  247. /*
  248. * Copy a null terminated string from userspace.
  249. */
  250. long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
  251. long __must_check strnlen_user(const char __user *src, long count);
  252. /*
  253. * Zero Userspace
  254. */
  255. unsigned long __must_check __clear_user(void __user *to, unsigned long size);
  256. static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
  257. {
  258. might_fault();
  259. return __clear_user(to, n);
  260. }
  261. void *s390_kernel_write(void *dst, const void *src, size_t size);
  262. int __noreturn __put_kernel_bad(void);
  263. #define __put_kernel_asm(val, to, insn) \
  264. ({ \
  265. int __rc; \
  266. \
  267. asm volatile( \
  268. "0: " insn " %[_val],%[_to]\n" \
  269. "1: xr %[rc],%[rc]\n" \
  270. "2:\n" \
  271. EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
  272. EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
  273. : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
  274. : [_val] "d" (val) \
  275. : "cc"); \
  276. __rc; \
  277. })
  278. #define __put_kernel_nofault(dst, src, type, err_label) \
  279. do { \
  280. unsigned long __x = (unsigned long)(*((type *)(src))); \
  281. int __pk_err; \
  282. \
  283. switch (sizeof(type)) { \
  284. case 1: \
  285. __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
  286. break; \
  287. case 2: \
  288. __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
  289. break; \
  290. case 4: \
  291. __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
  292. break; \
  293. case 8: \
  294. __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
  295. break; \
  296. default: \
  297. __pk_err = __put_kernel_bad(); \
  298. break; \
  299. } \
  300. if (unlikely(__pk_err)) \
  301. goto err_label; \
  302. } while (0)
  303. int __noreturn __get_kernel_bad(void);
  304. #define __get_kernel_asm(val, from, insn) \
  305. ({ \
  306. int __rc; \
  307. \
  308. asm volatile( \
  309. "0: " insn " %[_val],%[_from]\n" \
  310. "1: xr %[rc],%[rc]\n" \
  311. "2:\n" \
  312. EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
  313. EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
  314. : [rc] "=d" (__rc), [_val] "=d" (val) \
  315. : [_from] "Q" (*(from)) \
  316. : "cc"); \
  317. __rc; \
  318. })
  319. #define __get_kernel_nofault(dst, src, type, err_label) \
  320. do { \
  321. int __gk_err; \
  322. \
  323. switch (sizeof(type)) { \
  324. case 1: { \
  325. unsigned char __x; \
  326. \
  327. __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
  328. *((type *)(dst)) = (type)__x; \
  329. break; \
  330. }; \
  331. case 2: { \
  332. unsigned short __x; \
  333. \
  334. __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
  335. *((type *)(dst)) = (type)__x; \
  336. break; \
  337. }; \
  338. case 4: { \
  339. unsigned int __x; \
  340. \
  341. __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
  342. *((type *)(dst)) = (type)__x; \
  343. break; \
  344. }; \
  345. case 8: { \
  346. unsigned long __x; \
  347. \
  348. __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
  349. *((type *)(dst)) = (type)__x; \
  350. break; \
  351. }; \
  352. default: \
  353. __gk_err = __get_kernel_bad(); \
  354. break; \
  355. } \
  356. if (unlikely(__gk_err)) \
  357. goto err_label; \
  358. } while (0)
  359. #endif /* __S390_UACCESS_H */