uaccess.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_UACCESS_H
  3. #define _ASM_X86_UACCESS_H
  4. /*
  5. * User space memory access functions
  6. */
  7. #include <linux/compiler.h>
  8. #include <linux/instrumented.h>
  9. #include <linux/kasan-checks.h>
  10. #include <linux/string.h>
  11. #include <asm/asm.h>
  12. #include <asm/page.h>
  13. #include <asm/smap.h>
  14. #include <asm/extable.h>
  15. #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  16. static inline bool pagefault_disabled(void);
  17. # define WARN_ON_IN_IRQ() \
  18. WARN_ON_ONCE(!in_task() && !pagefault_disabled())
  19. #else
  20. # define WARN_ON_IN_IRQ()
  21. #endif
  22. /**
  23. * access_ok - Checks if a user space pointer is valid
  24. * @addr: User space pointer to start of block to check
  25. * @size: Size of block to check
  26. *
  27. * Context: User context only. This function may sleep if pagefaults are
  28. * enabled.
  29. *
  30. * Checks if a pointer to a block of memory in user space is valid.
  31. *
  32. * Note that, depending on architecture, this function probably just
  33. * checks that the pointer is in the user space range - after calling
  34. * this function, memory access functions may still return -EFAULT.
  35. *
  36. * Return: true (nonzero) if the memory block may be valid, false (zero)
  37. * if it is definitely invalid.
  38. */
  39. #define access_ok(addr, size) \
  40. ({ \
  41. WARN_ON_IN_IRQ(); \
  42. likely(__access_ok(addr, size)); \
  43. })
  44. #include <asm-generic/access_ok.h>
  45. extern int __get_user_1(void);
  46. extern int __get_user_2(void);
  47. extern int __get_user_4(void);
  48. extern int __get_user_8(void);
  49. extern int __get_user_nocheck_1(void);
  50. extern int __get_user_nocheck_2(void);
  51. extern int __get_user_nocheck_4(void);
  52. extern int __get_user_nocheck_8(void);
  53. extern int __get_user_bad(void);
  54. #define __uaccess_begin() stac()
  55. #define __uaccess_end() clac()
  56. #define __uaccess_begin_nospec() \
  57. ({ \
  58. stac(); \
  59. barrier_nospec(); \
  60. })
  61. /*
  62. * This is the smallest unsigned integer type that can fit a value
  63. * (up to 'long long')
  64. */
  65. #define __inttype(x) __typeof__( \
  66. __typefits(x,char, \
  67. __typefits(x,short, \
  68. __typefits(x,int, \
  69. __typefits(x,long,0ULL)))))
  70. #define __typefits(x,type,not) \
  71. __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
  72. /*
  73. * This is used for both get_user() and __get_user() to expand to
  74. * the proper special function call that has odd calling conventions
  75. * due to returning both a value and an error, and that depends on
  76. * the size of the pointer passed in.
  77. *
  78. * Careful: we have to cast the result to the type of the pointer
  79. * for sign reasons.
  80. *
  81. * The use of _ASM_DX as the register specifier is a bit of a
  82. * simplification, as gcc only cares about it as the starting point
  83. * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
  84. * (%ecx being the next register in gcc's x86 register sequence), and
  85. * %rdx on 64 bits.
  86. *
  87. * Clang/LLVM cares about the size of the register, but still wants
  88. * the base register for something that ends up being a pair.
  89. */
  90. #define do_get_user_call(fn,x,ptr) \
  91. ({ \
  92. int __ret_gu; \
  93. register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
  94. __chk_user_ptr(ptr); \
  95. asm volatile("call __" #fn "_%P4" \
  96. : "=a" (__ret_gu), "=r" (__val_gu), \
  97. ASM_CALL_CONSTRAINT \
  98. : "0" (ptr), "i" (sizeof(*(ptr)))); \
  99. instrument_get_user(__val_gu); \
  100. (x) = (__force __typeof__(*(ptr))) __val_gu; \
  101. __builtin_expect(__ret_gu, 0); \
  102. })
  103. /**
  104. * get_user - Get a simple variable from user space.
  105. * @x: Variable to store result.
  106. * @ptr: Source address, in user space.
  107. *
  108. * Context: User context only. This function may sleep if pagefaults are
  109. * enabled.
  110. *
  111. * This macro copies a single simple variable from user space to kernel
  112. * space. It supports simple types like char and int, but not larger
  113. * data types like structures or arrays.
  114. *
  115. * @ptr must have pointer-to-simple-variable type, and the result of
  116. * dereferencing @ptr must be assignable to @x without a cast.
  117. *
  118. * Return: zero on success, or -EFAULT on error.
  119. * On error, the variable @x is set to zero.
  120. */
  121. #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
  122. /**
  123. * __get_user - Get a simple variable from user space, with less checking.
  124. * @x: Variable to store result.
  125. * @ptr: Source address, in user space.
  126. *
  127. * Context: User context only. This function may sleep if pagefaults are
  128. * enabled.
  129. *
  130. * This macro copies a single simple variable from user space to kernel
  131. * space. It supports simple types like char and int, but not larger
  132. * data types like structures or arrays.
  133. *
  134. * @ptr must have pointer-to-simple-variable type, and the result of
  135. * dereferencing @ptr must be assignable to @x without a cast.
  136. *
  137. * Caller must check the pointer with access_ok() before calling this
  138. * function.
  139. *
  140. * Return: zero on success, or -EFAULT on error.
  141. * On error, the variable @x is set to zero.
  142. */
  143. #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
  144. #ifdef CONFIG_X86_32
  145. #define __put_user_goto_u64(x, addr, label) \
  146. asm_volatile_goto("\n" \
  147. "1: movl %%eax,0(%1)\n" \
  148. "2: movl %%edx,4(%1)\n" \
  149. _ASM_EXTABLE_UA(1b, %l2) \
  150. _ASM_EXTABLE_UA(2b, %l2) \
  151. : : "A" (x), "r" (addr) \
  152. : : label)
  153. #else
  154. #define __put_user_goto_u64(x, ptr, label) \
  155. __put_user_goto(x, ptr, "q", "er", label)
  156. #endif
  157. extern void __put_user_bad(void);
  158. /*
  159. * Strange magic calling convention: pointer in %ecx,
  160. * value in %eax(:%edx), return value in %ecx. clobbers %rbx
  161. */
  162. extern void __put_user_1(void);
  163. extern void __put_user_2(void);
  164. extern void __put_user_4(void);
  165. extern void __put_user_8(void);
  166. extern void __put_user_nocheck_1(void);
  167. extern void __put_user_nocheck_2(void);
  168. extern void __put_user_nocheck_4(void);
  169. extern void __put_user_nocheck_8(void);
  170. /*
  171. * ptr must be evaluated and assigned to the temporary __ptr_pu before
  172. * the assignment of x to __val_pu, to avoid any function calls
  173. * involved in the ptr expression (possibly implicitly generated due
  174. * to KASAN) from clobbering %ax.
  175. */
  176. #define do_put_user_call(fn,x,ptr) \
  177. ({ \
  178. int __ret_pu; \
  179. void __user *__ptr_pu; \
  180. register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
  181. __typeof__(*(ptr)) __x = (x); /* eval x once */ \
  182. __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
  183. __chk_user_ptr(__ptr); \
  184. __ptr_pu = __ptr; \
  185. __val_pu = __x; \
  186. asm volatile("call __" #fn "_%P[size]" \
  187. : "=c" (__ret_pu), \
  188. ASM_CALL_CONSTRAINT \
  189. : "0" (__ptr_pu), \
  190. "r" (__val_pu), \
  191. [size] "i" (sizeof(*(ptr))) \
  192. :"ebx"); \
  193. instrument_put_user(__x, __ptr, sizeof(*(ptr))); \
  194. __builtin_expect(__ret_pu, 0); \
  195. })
  196. /**
  197. * put_user - Write a simple value into user space.
  198. * @x: Value to copy to user space.
  199. * @ptr: Destination address, in user space.
  200. *
  201. * Context: User context only. This function may sleep if pagefaults are
  202. * enabled.
  203. *
  204. * This macro copies a single simple value from kernel space to user
  205. * space. It supports simple types like char and int, but not larger
  206. * data types like structures or arrays.
  207. *
  208. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  209. * to the result of dereferencing @ptr.
  210. *
  211. * Return: zero on success, or -EFAULT on error.
  212. */
  213. #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
  214. /**
  215. * __put_user - Write a simple value into user space, with less checking.
  216. * @x: Value to copy to user space.
  217. * @ptr: Destination address, in user space.
  218. *
  219. * Context: User context only. This function may sleep if pagefaults are
  220. * enabled.
  221. *
  222. * This macro copies a single simple value from kernel space to user
  223. * space. It supports simple types like char and int, but not larger
  224. * data types like structures or arrays.
  225. *
  226. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  227. * to the result of dereferencing @ptr.
  228. *
  229. * Caller must check the pointer with access_ok() before calling this
  230. * function.
  231. *
  232. * Return: zero on success, or -EFAULT on error.
  233. */
  234. #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
  235. #define __put_user_size(x, ptr, size, label) \
  236. do { \
  237. __typeof__(*(ptr)) __x = (x); /* eval x once */ \
  238. __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
  239. __chk_user_ptr(__ptr); \
  240. switch (size) { \
  241. case 1: \
  242. __put_user_goto(__x, __ptr, "b", "iq", label); \
  243. break; \
  244. case 2: \
  245. __put_user_goto(__x, __ptr, "w", "ir", label); \
  246. break; \
  247. case 4: \
  248. __put_user_goto(__x, __ptr, "l", "ir", label); \
  249. break; \
  250. case 8: \
  251. __put_user_goto_u64(__x, __ptr, label); \
  252. break; \
  253. default: \
  254. __put_user_bad(); \
  255. } \
  256. instrument_put_user(__x, __ptr, size); \
  257. } while (0)
  258. #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  259. #ifdef CONFIG_X86_32
  260. #define __get_user_asm_u64(x, ptr, label) do { \
  261. unsigned int __gu_low, __gu_high; \
  262. const unsigned int __user *__gu_ptr; \
  263. __gu_ptr = (const void __user *)(ptr); \
  264. __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
  265. __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
  266. (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
  267. } while (0)
  268. #else
  269. #define __get_user_asm_u64(x, ptr, label) \
  270. __get_user_asm(x, ptr, "q", "=r", label)
  271. #endif
  272. #define __get_user_size(x, ptr, size, label) \
  273. do { \
  274. __chk_user_ptr(ptr); \
  275. switch (size) { \
  276. case 1: { \
  277. unsigned char x_u8__; \
  278. __get_user_asm(x_u8__, ptr, "b", "=q", label); \
  279. (x) = x_u8__; \
  280. break; \
  281. } \
  282. case 2: \
  283. __get_user_asm(x, ptr, "w", "=r", label); \
  284. break; \
  285. case 4: \
  286. __get_user_asm(x, ptr, "l", "=r", label); \
  287. break; \
  288. case 8: \
  289. __get_user_asm_u64(x, ptr, label); \
  290. break; \
  291. default: \
  292. (x) = __get_user_bad(); \
  293. } \
  294. instrument_get_user(x); \
  295. } while (0)
  296. #define __get_user_asm(x, addr, itype, ltype, label) \
  297. asm_volatile_goto("\n" \
  298. "1: mov"itype" %[umem],%[output]\n" \
  299. _ASM_EXTABLE_UA(1b, %l2) \
  300. : [output] ltype(x) \
  301. : [umem] "m" (__m(addr)) \
  302. : : label)
  303. #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  304. #ifdef CONFIG_X86_32
  305. #define __get_user_asm_u64(x, ptr, retval) \
  306. ({ \
  307. __typeof__(ptr) __ptr = (ptr); \
  308. asm volatile("\n" \
  309. "1: movl %[lowbits],%%eax\n" \
  310. "2: movl %[highbits],%%edx\n" \
  311. "3:\n" \
  312. _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
  313. EX_FLAG_CLEAR_AX_DX, \
  314. %[errout]) \
  315. _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
  316. EX_FLAG_CLEAR_AX_DX, \
  317. %[errout]) \
  318. : [errout] "=r" (retval), \
  319. [output] "=&A"(x) \
  320. : [lowbits] "m" (__m(__ptr)), \
  321. [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
  322. "0" (retval)); \
  323. })
  324. #else
  325. #define __get_user_asm_u64(x, ptr, retval) \
  326. __get_user_asm(x, ptr, retval, "q")
  327. #endif
  328. #define __get_user_size(x, ptr, size, retval) \
  329. do { \
  330. unsigned char x_u8__; \
  331. \
  332. retval = 0; \
  333. __chk_user_ptr(ptr); \
  334. switch (size) { \
  335. case 1: \
  336. __get_user_asm(x_u8__, ptr, retval, "b"); \
  337. (x) = x_u8__; \
  338. break; \
  339. case 2: \
  340. __get_user_asm(x, ptr, retval, "w"); \
  341. break; \
  342. case 4: \
  343. __get_user_asm(x, ptr, retval, "l"); \
  344. break; \
  345. case 8: \
  346. __get_user_asm_u64(x, ptr, retval); \
  347. break; \
  348. default: \
  349. (x) = __get_user_bad(); \
  350. } \
  351. } while (0)
  352. #define __get_user_asm(x, addr, err, itype) \
  353. asm volatile("\n" \
  354. "1: mov"itype" %[umem],%[output]\n" \
  355. "2:\n" \
  356. _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
  357. EX_FLAG_CLEAR_AX, \
  358. %[errout]) \
  359. : [errout] "=r" (err), \
  360. [output] "=a" (x) \
  361. : [umem] "m" (__m(addr)), \
  362. "0" (err))
  363. #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  364. #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
  365. #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
  366. bool success; \
  367. __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
  368. __typeof__(*(_ptr)) __old = *_old; \
  369. __typeof__(*(_ptr)) __new = (_new); \
  370. asm_volatile_goto("\n" \
  371. "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
  372. _ASM_EXTABLE_UA(1b, %l[label]) \
  373. : CC_OUT(z) (success), \
  374. [ptr] "+m" (*_ptr), \
  375. [old] "+a" (__old) \
  376. : [new] ltype (__new) \
  377. : "memory" \
  378. : label); \
  379. if (unlikely(!success)) \
  380. *_old = __old; \
  381. likely(success); })
  382. #ifdef CONFIG_X86_32
  383. #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
  384. bool success; \
  385. __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
  386. __typeof__(*(_ptr)) __old = *_old; \
  387. __typeof__(*(_ptr)) __new = (_new); \
  388. asm_volatile_goto("\n" \
  389. "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
  390. _ASM_EXTABLE_UA(1b, %l[label]) \
  391. : CC_OUT(z) (success), \
  392. "+A" (__old), \
  393. [ptr] "+m" (*_ptr) \
  394. : "b" ((u32)__new), \
  395. "c" ((u32)((u64)__new >> 32)) \
  396. : "memory" \
  397. : label); \
  398. if (unlikely(!success)) \
  399. *_old = __old; \
  400. likely(success); })
  401. #endif // CONFIG_X86_32
  402. #else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
  403. #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
  404. int __err = 0; \
  405. bool success; \
  406. __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
  407. __typeof__(*(_ptr)) __old = *_old; \
  408. __typeof__(*(_ptr)) __new = (_new); \
  409. asm volatile("\n" \
  410. "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
  411. CC_SET(z) \
  412. "2:\n" \
  413. _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
  414. %[errout]) \
  415. : CC_OUT(z) (success), \
  416. [errout] "+r" (__err), \
  417. [ptr] "+m" (*_ptr), \
  418. [old] "+a" (__old) \
  419. : [new] ltype (__new) \
  420. : "memory"); \
  421. if (unlikely(__err)) \
  422. goto label; \
  423. if (unlikely(!success)) \
  424. *_old = __old; \
  425. likely(success); })
  426. #ifdef CONFIG_X86_32
  427. /*
  428. * Unlike the normal CMPXCHG, use output GPR for both success/fail and error.
  429. * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
  430. * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
  431. * both ESI and EDI for the memory operand, compilation will fail if the error
  432. * is an input+output as there will be no register available for input.
  433. */
  434. #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
  435. int __result; \
  436. __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
  437. __typeof__(*(_ptr)) __old = *_old; \
  438. __typeof__(*(_ptr)) __new = (_new); \
  439. asm volatile("\n" \
  440. "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
  441. "mov $0, %[result]\n\t" \
  442. "setz %b[result]\n" \
  443. "2:\n" \
  444. _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
  445. %[result]) \
  446. : [result] "=q" (__result), \
  447. "+A" (__old), \
  448. [ptr] "+m" (*_ptr) \
  449. : "b" ((u32)__new), \
  450. "c" ((u32)((u64)__new >> 32)) \
  451. : "memory", "cc"); \
  452. if (unlikely(__result < 0)) \
  453. goto label; \
  454. if (unlikely(!__result)) \
  455. *_old = __old; \
  456. likely(__result); })
  457. #endif // CONFIG_X86_32
  458. #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
  459. /* FIXME: this hack is definitely wrong -AK */
  460. struct __large_struct { unsigned long buf[100]; };
  461. #define __m(x) (*(struct __large_struct __user *)(x))
  462. /*
  463. * Tell gcc we read from memory instead of writing: this is because
  464. * we do not write to any memory gcc knows about, so there are no
  465. * aliasing issues.
  466. */
  467. #define __put_user_goto(x, addr, itype, ltype, label) \
  468. asm_volatile_goto("\n" \
  469. "1: mov"itype" %0,%1\n" \
  470. _ASM_EXTABLE_UA(1b, %l2) \
  471. : : ltype(x), "m" (__m(addr)) \
  472. : : label)
  473. extern unsigned long
  474. copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
  475. extern __must_check long
  476. strncpy_from_user(char *dst, const char __user *src, long count);
  477. extern __must_check long strnlen_user(const char __user *str, long n);
  478. #ifdef CONFIG_ARCH_HAS_COPY_MC
  479. unsigned long __must_check
  480. copy_mc_to_kernel(void *to, const void *from, unsigned len);
  481. #define copy_mc_to_kernel copy_mc_to_kernel
  482. unsigned long __must_check
  483. copy_mc_to_user(void __user *to, const void *from, unsigned len);
  484. #endif
  485. /*
  486. * movsl can be slow when source and dest are not both 8-byte aligned
  487. */
  488. #ifdef CONFIG_X86_INTEL_USERCOPY
  489. extern struct movsl_mask {
  490. int mask;
  491. } ____cacheline_aligned_in_smp movsl_mask;
  492. #endif
  493. #define ARCH_HAS_NOCACHE_UACCESS 1
  494. #ifdef CONFIG_X86_32
  495. unsigned long __must_check clear_user(void __user *mem, unsigned long len);
  496. unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
  497. # include <asm/uaccess_32.h>
  498. #else
  499. # include <asm/uaccess_64.h>
  500. #endif
  501. /*
  502. * The "unsafe" user accesses aren't really "unsafe", but the naming
  503. * is a big fat warning: you have to not only do the access_ok()
  504. * checking before using them, but you have to surround them with the
  505. * user_access_begin/end() pair.
  506. */
  507. static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
  508. {
  509. if (unlikely(!access_ok(ptr,len)))
  510. return 0;
  511. __uaccess_begin_nospec();
  512. return 1;
  513. }
  514. #define user_access_begin(a,b) user_access_begin(a,b)
  515. #define user_access_end() __uaccess_end()
  516. #define user_access_save() smap_save()
  517. #define user_access_restore(x) smap_restore(x)
  518. #define unsafe_put_user(x, ptr, label) \
  519. __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
  520. #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  521. #define unsafe_get_user(x, ptr, err_label) \
  522. do { \
  523. __inttype(*(ptr)) __gu_val; \
  524. __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
  525. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  526. } while (0)
  527. #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  528. #define unsafe_get_user(x, ptr, err_label) \
  529. do { \
  530. int __gu_err; \
  531. __inttype(*(ptr)) __gu_val; \
  532. __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
  533. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  534. if (unlikely(__gu_err)) goto err_label; \
  535. } while (0)
  536. #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  537. extern void __try_cmpxchg_user_wrong_size(void);
  538. #ifndef CONFIG_X86_32
  539. #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
  540. __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
  541. #endif
  542. /*
  543. * Force the pointer to u<size> to match the size expected by the asm helper.
  544. * clang/LLVM compiles all cases and only discards the unused paths after
  545. * processing errors, which breaks i386 if the pointer is an 8-byte value.
  546. */
  547. #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
  548. bool __ret; \
  549. __chk_user_ptr(_ptr); \
  550. switch (sizeof(*(_ptr))) { \
  551. case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
  552. (__force u8 *)(_ptr), (_oldp), \
  553. (_nval), _label); \
  554. break; \
  555. case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
  556. (__force u16 *)(_ptr), (_oldp), \
  557. (_nval), _label); \
  558. break; \
  559. case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
  560. (__force u32 *)(_ptr), (_oldp), \
  561. (_nval), _label); \
  562. break; \
  563. case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
  564. (_nval), _label); \
  565. break; \
  566. default: __try_cmpxchg_user_wrong_size(); \
  567. } \
  568. __ret; })
  569. /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
  570. #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
  571. int __ret = -EFAULT; \
  572. __uaccess_begin_nospec(); \
  573. __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
  574. _label: \
  575. __uaccess_end(); \
  576. __ret; \
  577. })
  578. /*
  579. * We want the unsafe accessors to always be inlined and use
  580. * the error labels - thus the macro games.
  581. */
  582. #define unsafe_copy_loop(dst, src, len, type, label) \
  583. while (len >= sizeof(type)) { \
  584. unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
  585. dst += sizeof(type); \
  586. src += sizeof(type); \
  587. len -= sizeof(type); \
  588. }
  589. #define unsafe_copy_to_user(_dst,_src,_len,label) \
  590. do { \
  591. char __user *__ucu_dst = (_dst); \
  592. const char *__ucu_src = (_src); \
  593. size_t __ucu_len = (_len); \
  594. unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
  595. unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
  596. unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
  597. unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
  598. } while (0)
  599. #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  600. #define __get_kernel_nofault(dst, src, type, err_label) \
  601. __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
  602. sizeof(type), err_label)
  603. #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  604. #define __get_kernel_nofault(dst, src, type, err_label) \
  605. do { \
  606. int __kr_err; \
  607. \
  608. __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
  609. sizeof(type), __kr_err); \
  610. if (unlikely(__kr_err)) \
  611. goto err_label; \
  612. } while (0)
  613. #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  614. #define __put_kernel_nofault(dst, src, type, err_label) \
  615. __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
  616. sizeof(type), err_label)
  617. #endif /* _ASM_X86_UACCESS_H */