uaccess.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. * Copyright (C) 2014, Imagination Technologies Ltd.
  10. */
  11. #ifndef _ASM_UACCESS_H
  12. #define _ASM_UACCESS_H
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <asm/asm-eva.h>
  16. #include <asm/extable.h>
  17. #ifdef CONFIG_32BIT
  18. #define __UA_LIMIT 0x80000000UL
  19. #define TASK_SIZE_MAX KSEG0
  20. #define __UA_ADDR ".word"
  21. #define __UA_LA "la"
  22. #define __UA_ADDU "addu"
  23. #define __UA_t0 "$8"
  24. #define __UA_t1 "$9"
  25. #endif /* CONFIG_32BIT */
  26. #ifdef CONFIG_64BIT
  27. extern u64 __ua_limit;
  28. #define __UA_LIMIT __ua_limit
  29. #define TASK_SIZE_MAX XKSSEG
  30. #define __UA_ADDR ".dword"
  31. #define __UA_LA "dla"
  32. #define __UA_ADDU "daddu"
  33. #define __UA_t0 "$12"
  34. #define __UA_t1 "$13"
  35. #endif /* CONFIG_64BIT */
  36. #include <asm-generic/access_ok.h>
  37. /*
  38. * put_user: - Write a simple value into user space.
  39. * @x: Value to copy to user space.
  40. * @ptr: Destination address, in user space.
  41. *
  42. * Context: User context only. This function may sleep if pagefaults are
  43. * enabled.
  44. *
  45. * This macro copies a single simple value from kernel space to user
  46. * space. It supports simple types like char and int, but not larger
  47. * data types like structures or arrays.
  48. *
  49. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  50. * to the result of dereferencing @ptr.
  51. *
  52. * Returns zero on success, or -EFAULT on error.
  53. */
  54. #define put_user(x, ptr) \
  55. ({ \
  56. __typeof__(*(ptr)) __user *__p = (ptr); \
  57. \
  58. might_fault(); \
  59. access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \
  60. })
  61. /*
  62. * get_user: - Get a simple variable from user space.
  63. * @x: Variable to store result.
  64. * @ptr: Source address, in user space.
  65. *
  66. * Context: User context only. This function may sleep if pagefaults are
  67. * enabled.
  68. *
  69. * This macro copies a single simple variable from user space to kernel
  70. * space. It supports simple types like char and int, but not larger
  71. * data types like structures or arrays.
  72. *
  73. * @ptr must have pointer-to-simple-variable type, and the result of
  74. * dereferencing @ptr must be assignable to @x without a cast.
  75. *
  76. * Returns zero on success, or -EFAULT on error.
  77. * On error, the variable @x is set to zero.
  78. */
  79. #define get_user(x, ptr) \
  80. ({ \
  81. const __typeof__(*(ptr)) __user *__p = (ptr); \
  82. \
  83. might_fault(); \
  84. access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \
  85. ((x) = 0, -EFAULT); \
  86. })
  87. /*
  88. * __put_user: - Write a simple value into user space, with less checking.
  89. * @x: Value to copy to user space.
  90. * @ptr: Destination address, in user space.
  91. *
  92. * Context: User context only. This function may sleep if pagefaults are
  93. * enabled.
  94. *
  95. * This macro copies a single simple value from kernel space to user
  96. * space. It supports simple types like char and int, but not larger
  97. * data types like structures or arrays.
  98. *
  99. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  100. * to the result of dereferencing @ptr.
  101. *
  102. * Caller must check the pointer with access_ok() before calling this
  103. * function.
  104. *
  105. * Returns zero on success, or -EFAULT on error.
  106. */
  107. #define __put_user(x, ptr) \
  108. ({ \
  109. __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
  110. __typeof__(*(ptr)) __pu_val = (x); \
  111. int __pu_err = 0; \
  112. \
  113. __chk_user_ptr(__pu_ptr); \
  114. switch (sizeof(*__pu_ptr)) { \
  115. case 1: \
  116. __put_data_asm(user_sb, __pu_ptr); \
  117. break; \
  118. case 2: \
  119. __put_data_asm(user_sh, __pu_ptr); \
  120. break; \
  121. case 4: \
  122. __put_data_asm(user_sw, __pu_ptr); \
  123. break; \
  124. case 8: \
  125. __PUT_DW(user_sd, __pu_ptr); \
  126. break; \
  127. default: \
  128. BUILD_BUG(); \
  129. } \
  130. \
  131. __pu_err; \
  132. })
  133. /*
  134. * __get_user: - Get a simple variable from user space, with less checking.
  135. * @x: Variable to store result.
  136. * @ptr: Source address, in user space.
  137. *
  138. * Context: User context only. This function may sleep if pagefaults are
  139. * enabled.
  140. *
  141. * This macro copies a single simple variable from user space to kernel
  142. * space. It supports simple types like char and int, but not larger
  143. * data types like structures or arrays.
  144. *
  145. * @ptr must have pointer-to-simple-variable type, and the result of
  146. * dereferencing @ptr must be assignable to @x without a cast.
  147. *
  148. * Caller must check the pointer with access_ok() before calling this
  149. * function.
  150. *
  151. * Returns zero on success, or -EFAULT on error.
  152. * On error, the variable @x is set to zero.
  153. */
  154. #define __get_user(x, ptr) \
  155. ({ \
  156. const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
  157. int __gu_err = 0; \
  158. \
  159. __chk_user_ptr(__gu_ptr); \
  160. switch (sizeof(*__gu_ptr)) { \
  161. case 1: \
  162. __get_data_asm((x), user_lb, __gu_ptr); \
  163. break; \
  164. case 2: \
  165. __get_data_asm((x), user_lh, __gu_ptr); \
  166. break; \
  167. case 4: \
  168. __get_data_asm((x), user_lw, __gu_ptr); \
  169. break; \
  170. case 8: \
  171. __GET_DW((x), user_ld, __gu_ptr); \
  172. break; \
  173. default: \
  174. BUILD_BUG(); \
  175. } \
  176. \
  177. __gu_err; \
  178. })
  179. struct __large_struct { unsigned long buf[100]; };
  180. #define __m(x) (*(struct __large_struct __user *)(x))
  181. #ifdef CONFIG_32BIT
  182. #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
  183. #endif
  184. #ifdef CONFIG_64BIT
  185. #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
  186. #endif
  187. #define __get_data_asm(val, insn, addr) \
  188. { \
  189. long __gu_tmp; \
  190. \
  191. __asm__ __volatile__( \
  192. "1: "insn("%1", "%3")" \n" \
  193. "2: \n" \
  194. " .insn \n" \
  195. " .section .fixup,\"ax\" \n" \
  196. "3: li %0, %4 \n" \
  197. " move %1, $0 \n" \
  198. " j 2b \n" \
  199. " .previous \n" \
  200. " .section __ex_table,\"a\" \n" \
  201. " "__UA_ADDR "\t1b, 3b \n" \
  202. " .previous \n" \
  203. : "=r" (__gu_err), "=r" (__gu_tmp) \
  204. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  205. \
  206. (val) = (__typeof__(*(addr))) __gu_tmp; \
  207. }
  208. /*
  209. * Get a long long 64 using 32 bit registers.
  210. */
  211. #define __get_data_asm_ll32(val, insn, addr) \
  212. { \
  213. union { \
  214. unsigned long long l; \
  215. __typeof__(*(addr)) t; \
  216. } __gu_tmp; \
  217. \
  218. __asm__ __volatile__( \
  219. "1: " insn("%1", "(%3)")" \n" \
  220. "2: " insn("%D1", "4(%3)")" \n" \
  221. "3: \n" \
  222. " .insn \n" \
  223. " .section .fixup,\"ax\" \n" \
  224. "4: li %0, %4 \n" \
  225. " move %1, $0 \n" \
  226. " move %D1, $0 \n" \
  227. " j 3b \n" \
  228. " .previous \n" \
  229. " .section __ex_table,\"a\" \n" \
  230. " " __UA_ADDR " 1b, 4b \n" \
  231. " " __UA_ADDR " 2b, 4b \n" \
  232. " .previous \n" \
  233. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  234. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  235. \
  236. (val) = __gu_tmp.t; \
  237. }
  238. #define __get_kernel_nofault(dst, src, type, err_label) \
  239. do { \
  240. int __gu_err; \
  241. \
  242. switch (sizeof(type)) { \
  243. case 1: \
  244. __get_data_asm(*(type *)(dst), kernel_lb, \
  245. (__force type *)(src)); \
  246. break; \
  247. case 2: \
  248. __get_data_asm(*(type *)(dst), kernel_lh, \
  249. (__force type *)(src)); \
  250. break; \
  251. case 4: \
  252. __get_data_asm(*(type *)(dst), kernel_lw, \
  253. (__force type *)(src)); \
  254. break; \
  255. case 8: \
  256. __GET_DW(*(type *)(dst), kernel_ld, \
  257. (__force type *)(src)); \
  258. break; \
  259. default: \
  260. BUILD_BUG(); \
  261. break; \
  262. } \
  263. if (unlikely(__gu_err)) \
  264. goto err_label; \
  265. } while (0)
  266. /*
  267. * Yuck. We need two variants, one for 64bit operation and one
  268. * for 32 bit mode and old iron.
  269. */
  270. #ifdef CONFIG_32BIT
  271. #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
  272. #endif
  273. #ifdef CONFIG_64BIT
  274. #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
  275. #endif
  276. #define __put_data_asm(insn, ptr) \
  277. { \
  278. __asm__ __volatile__( \
  279. "1: "insn("%z2", "%3")" # __put_data_asm \n" \
  280. "2: \n" \
  281. " .insn \n" \
  282. " .section .fixup,\"ax\" \n" \
  283. "3: li %0, %4 \n" \
  284. " j 2b \n" \
  285. " .previous \n" \
  286. " .section __ex_table,\"a\" \n" \
  287. " " __UA_ADDR " 1b, 3b \n" \
  288. " .previous \n" \
  289. : "=r" (__pu_err) \
  290. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  291. "i" (-EFAULT)); \
  292. }
  293. #define __put_data_asm_ll32(insn, ptr) \
  294. { \
  295. __asm__ __volatile__( \
  296. "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
  297. "2: "insn("%D2", "4(%3)")" \n" \
  298. "3: \n" \
  299. " .insn \n" \
  300. " .section .fixup,\"ax\" \n" \
  301. "4: li %0, %4 \n" \
  302. " j 3b \n" \
  303. " .previous \n" \
  304. " .section __ex_table,\"a\" \n" \
  305. " " __UA_ADDR " 1b, 4b \n" \
  306. " " __UA_ADDR " 2b, 4b \n" \
  307. " .previous" \
  308. : "=r" (__pu_err) \
  309. : "0" (0), "r" (__pu_val), "r" (ptr), \
  310. "i" (-EFAULT)); \
  311. }
  312. #define __put_kernel_nofault(dst, src, type, err_label) \
  313. do { \
  314. type __pu_val; \
  315. int __pu_err = 0; \
  316. \
  317. __pu_val = *(__force type *)(src); \
  318. switch (sizeof(type)) { \
  319. case 1: \
  320. __put_data_asm(kernel_sb, (type *)(dst)); \
  321. break; \
  322. case 2: \
  323. __put_data_asm(kernel_sh, (type *)(dst)); \
  324. break; \
  325. case 4: \
  326. __put_data_asm(kernel_sw, (type *)(dst)) \
  327. break; \
  328. case 8: \
  329. __PUT_DW(kernel_sd, (type *)(dst)); \
  330. break; \
  331. default: \
  332. BUILD_BUG(); \
  333. break; \
  334. } \
  335. if (unlikely(__pu_err)) \
  336. goto err_label; \
  337. } while (0)
  338. /*
  339. * We're generating jump to subroutines which will be outside the range of
  340. * jump instructions
  341. */
  342. #ifdef MODULE
  343. #define __MODULE_JAL(destination) \
  344. ".set\tnoat\n\t" \
  345. __UA_LA "\t$1, " #destination "\n\t" \
  346. "jalr\t$1\n\t" \
  347. ".set\tat\n\t"
  348. #else
  349. #define __MODULE_JAL(destination) \
  350. "jal\t" #destination "\n\t"
  351. #endif
  352. #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
  353. defined(CONFIG_CPU_HAS_PREFETCH))
  354. #define DADDI_SCRATCH "$3"
  355. #else
  356. #define DADDI_SCRATCH "$0"
  357. #endif
  358. extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
  359. extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
  360. static inline unsigned long
  361. raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  362. {
  363. register void *__cu_to_r __asm__("$4");
  364. register const void __user *__cu_from_r __asm__("$5");
  365. register long __cu_len_r __asm__("$6");
  366. __cu_to_r = to;
  367. __cu_from_r = from;
  368. __cu_len_r = n;
  369. __asm__ __volatile__(
  370. ".set\tnoreorder\n\t"
  371. __MODULE_JAL(__raw_copy_from_user)
  372. ".set\tnoat\n\t"
  373. __UA_ADDU "\t$1, %1, %2\n\t"
  374. ".set\tat\n\t"
  375. ".set\treorder"
  376. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
  377. :
  378. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
  379. DADDI_SCRATCH, "memory");
  380. return __cu_len_r;
  381. }
  382. static inline unsigned long
  383. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  384. {
  385. register void __user *__cu_to_r __asm__("$4");
  386. register const void *__cu_from_r __asm__("$5");
  387. register long __cu_len_r __asm__("$6");
  388. __cu_to_r = (to);
  389. __cu_from_r = (from);
  390. __cu_len_r = (n);
  391. __asm__ __volatile__(
  392. __MODULE_JAL(__raw_copy_to_user)
  393. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
  394. :
  395. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
  396. DADDI_SCRATCH, "memory");
  397. return __cu_len_r;
  398. }
  399. #define INLINE_COPY_FROM_USER
  400. #define INLINE_COPY_TO_USER
  401. extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
  402. /*
  403. * __clear_user: - Zero a block of memory in user space, with less checking.
  404. * @to: Destination address, in user space.
  405. * @n: Number of bytes to zero.
  406. *
  407. * Zero a block of memory in user space. Caller must check
  408. * the specified block with access_ok() before calling this function.
  409. *
  410. * Returns number of bytes that could not be cleared.
  411. * On success, this will be zero.
  412. */
  413. static inline __kernel_size_t
  414. __clear_user(void __user *addr, __kernel_size_t size)
  415. {
  416. __kernel_size_t res;
  417. #ifdef CONFIG_CPU_MICROMIPS
  418. /* micromips memset / bzero also clobbers t7 & t8 */
  419. #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
  420. #else
  421. #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
  422. #endif /* CONFIG_CPU_MICROMIPS */
  423. might_fault();
  424. __asm__ __volatile__(
  425. "move\t$4, %1\n\t"
  426. "move\t$5, $0\n\t"
  427. "move\t$6, %2\n\t"
  428. __MODULE_JAL(__bzero)
  429. "move\t%0, $6"
  430. : "=r" (res)
  431. : "r" (addr), "r" (size)
  432. : bzero_clobbers);
  433. return res;
  434. }
  435. #define clear_user(addr,n) \
  436. ({ \
  437. void __user * __cl_addr = (addr); \
  438. unsigned long __cl_size = (n); \
  439. if (__cl_size && access_ok(__cl_addr, __cl_size)) \
  440. __cl_size = __clear_user(__cl_addr, __cl_size); \
  441. __cl_size; \
  442. })
  443. extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
  444. /*
  445. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  446. * @dst: Destination address, in kernel space. This buffer must be at
  447. * least @count bytes long.
  448. * @src: Source address, in user space.
  449. * @count: Maximum number of bytes to copy, including the trailing NUL.
  450. *
  451. * Copies a NUL-terminated string from userspace to kernel space.
  452. *
  453. * On success, returns the length of the string (not including the trailing
  454. * NUL).
  455. *
  456. * If access to userspace fails, returns -EFAULT (some data may have been
  457. * copied).
  458. *
  459. * If @count is smaller than the length of the string, copies @count bytes
  460. * and returns @count.
  461. */
  462. static inline long
  463. strncpy_from_user(char *__to, const char __user *__from, long __len)
  464. {
  465. long res;
  466. if (!access_ok(__from, __len))
  467. return -EFAULT;
  468. might_fault();
  469. __asm__ __volatile__(
  470. "move\t$4, %1\n\t"
  471. "move\t$5, %2\n\t"
  472. "move\t$6, %3\n\t"
  473. __MODULE_JAL(__strncpy_from_user_asm)
  474. "move\t%0, $2"
  475. : "=r" (res)
  476. : "r" (__to), "r" (__from), "r" (__len)
  477. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  478. return res;
  479. }
  480. extern long __strnlen_user_asm(const char __user *s, long n);
  481. /*
  482. * strnlen_user: - Get the size of a string in user space.
  483. * @str: The string to measure.
  484. *
  485. * Context: User context only. This function may sleep if pagefaults are
  486. * enabled.
  487. *
  488. * Get the size of a NUL-terminated string in user space.
  489. *
  490. * Returns the size of the string INCLUDING the terminating NUL.
  491. * On exception, returns 0.
  492. * If the string is too long, returns a value greater than @n.
  493. */
  494. static inline long strnlen_user(const char __user *s, long n)
  495. {
  496. long res;
  497. if (!access_ok(s, 1))
  498. return 0;
  499. might_fault();
  500. __asm__ __volatile__(
  501. "move\t$4, %1\n\t"
  502. "move\t$5, %2\n\t"
  503. __MODULE_JAL(__strnlen_user_asm)
  504. "move\t%0, $2"
  505. : "=r" (res)
  506. : "r" (s), "r" (n)
  507. : "$2", "$4", "$5", __UA_t0, "$31");
  508. return res;
  509. }
  510. #endif /* _ASM_UACCESS_H */