uaccess.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_UACCESS_H__
  3. #define __LINUX_UACCESS_H__
  4. #include <linux/fault-inject-usercopy.h>
  5. #include <linux/instrumented.h>
  6. #include <linux/minmax.h>
  7. #include <linux/sched.h>
  8. #include <linux/thread_info.h>
  9. #include <asm/uaccess.h>
  10. /*
  11. * Architectures should provide two primitives (raw_copy_{to,from}_user())
  12. * and get rid of their private instances of copy_{to,from}_user() and
  13. * __copy_{to,from}_user{,_inatomic}().
  14. *
  15. * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
  16. * return the amount left to copy. They should assume that access_ok() has
  17. * already been checked (and succeeded); they should *not* zero-pad anything.
  18. * No KASAN or object size checks either - those belong here.
  19. *
  20. * Both of these functions should attempt to copy size bytes starting at from
  21. * into the area starting at to. They must not fetch or store anything
  22. * outside of those areas. Return value must be between 0 (everything
  23. * copied successfully) and size (nothing copied).
  24. *
  25. * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
  26. * at to must become equal to the bytes fetched from the corresponding area
  27. * starting at from. All data past to + size - N must be left unmodified.
  28. *
  29. * If copying succeeds, the return value must be 0. If some data cannot be
  30. * fetched, it is permitted to copy less than had been fetched; the only
  31. * hard requirement is that not storing anything at all (i.e. returning size)
  32. * should happen only when nothing could be copied. In other words, you don't
  33. * have to squeeze as much as possible - it is allowed, but not necessary.
  34. *
  35. * For raw_copy_from_user() to always points to kernel memory and no faults
  36. * on store should happen. Interpretation of from is affected by set_fs().
  37. * For raw_copy_to_user() it's the other way round.
  38. *
  39. * Both can be inlined - it's up to architectures whether it wants to bother
  40. * with that. They should not be used directly; they are used to implement
  41. * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
  42. * that are used instead. Out of those, __... ones are inlined. Plain
  43. * copy_{to,from}_user() might or might not be inlined. If you want them
  44. * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
  45. *
  46. * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
  47. * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
  48. * at all; their callers absolutely must check the return value.
  49. *
  50. * Biarch ones should also provide raw_copy_in_user() - similar to the above,
  51. * but both source and destination are __user pointers (affected by set_fs()
  52. * as usual) and both source and destination can trigger faults.
  53. */
  54. static __always_inline __must_check unsigned long
  55. __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
  56. {
  57. unsigned long res;
  58. instrument_copy_from_user_before(to, from, n);
  59. check_object_size(to, n, false);
  60. res = raw_copy_from_user(to, from, n);
  61. instrument_copy_from_user_after(to, from, n, res);
  62. return res;
  63. }
  64. static __always_inline __must_check unsigned long
  65. __copy_from_user(void *to, const void __user *from, unsigned long n)
  66. {
  67. unsigned long res;
  68. might_fault();
  69. instrument_copy_from_user_before(to, from, n);
  70. if (should_fail_usercopy())
  71. return n;
  72. check_object_size(to, n, false);
  73. res = raw_copy_from_user(to, from, n);
  74. instrument_copy_from_user_after(to, from, n, res);
  75. return res;
  76. }
  77. /**
  78. * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
  79. * @to: Destination address, in user space.
  80. * @from: Source address, in kernel space.
  81. * @n: Number of bytes to copy.
  82. *
  83. * Context: User context only.
  84. *
  85. * Copy data from kernel space to user space. Caller must check
  86. * the specified block with access_ok() before calling this function.
  87. * The caller should also make sure he pins the user space address
  88. * so that we don't result in page fault and sleep.
  89. */
  90. static __always_inline __must_check unsigned long
  91. __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
  92. {
  93. if (should_fail_usercopy())
  94. return n;
  95. instrument_copy_to_user(to, from, n);
  96. check_object_size(from, n, true);
  97. return raw_copy_to_user(to, from, n);
  98. }
  99. static __always_inline __must_check unsigned long
  100. __copy_to_user(void __user *to, const void *from, unsigned long n)
  101. {
  102. might_fault();
  103. if (should_fail_usercopy())
  104. return n;
  105. instrument_copy_to_user(to, from, n);
  106. check_object_size(from, n, true);
  107. return raw_copy_to_user(to, from, n);
  108. }
  109. #ifdef INLINE_COPY_FROM_USER
  110. static inline __must_check unsigned long
  111. _copy_from_user(void *to, const void __user *from, unsigned long n)
  112. {
  113. unsigned long res = n;
  114. might_fault();
  115. if (!should_fail_usercopy() && likely(access_ok(from, n))) {
  116. instrument_copy_from_user_before(to, from, n);
  117. res = raw_copy_from_user(to, from, n);
  118. instrument_copy_from_user_after(to, from, n, res);
  119. }
  120. if (unlikely(res))
  121. memset(to + (n - res), 0, res);
  122. return res;
  123. }
  124. #else
  125. extern __must_check unsigned long
  126. _copy_from_user(void *, const void __user *, unsigned long);
  127. #endif
  128. #ifdef INLINE_COPY_TO_USER
  129. static inline __must_check unsigned long
  130. _copy_to_user(void __user *to, const void *from, unsigned long n)
  131. {
  132. might_fault();
  133. if (should_fail_usercopy())
  134. return n;
  135. if (access_ok(to, n)) {
  136. instrument_copy_to_user(to, from, n);
  137. n = raw_copy_to_user(to, from, n);
  138. }
  139. return n;
  140. }
  141. #else
  142. extern __must_check unsigned long
  143. _copy_to_user(void __user *, const void *, unsigned long);
  144. #endif
  145. static __always_inline unsigned long __must_check
  146. copy_from_user(void *to, const void __user *from, unsigned long n)
  147. {
  148. if (check_copy_size(to, n, false))
  149. n = _copy_from_user(to, from, n);
  150. return n;
  151. }
  152. static __always_inline unsigned long __must_check
  153. copy_to_user(void __user *to, const void *from, unsigned long n)
  154. {
  155. if (check_copy_size(from, n, true))
  156. n = _copy_to_user(to, from, n);
  157. return n;
  158. }
  159. #ifndef copy_mc_to_kernel
  160. /*
  161. * Without arch opt-in this generic copy_mc_to_kernel() will not handle
  162. * #MC (or arch equivalent) during source read.
  163. */
  164. static inline unsigned long __must_check
  165. copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
  166. {
  167. memcpy(dst, src, cnt);
  168. return 0;
  169. }
  170. #endif
  171. static __always_inline void pagefault_disabled_inc(void)
  172. {
  173. current->pagefault_disabled++;
  174. }
  175. static __always_inline void pagefault_disabled_dec(void)
  176. {
  177. current->pagefault_disabled--;
  178. }
  179. /*
  180. * These routines enable/disable the pagefault handler. If disabled, it will
  181. * not take any locks and go straight to the fixup table.
  182. *
  183. * User access methods will not sleep when called from a pagefault_disabled()
  184. * environment.
  185. */
  186. static inline void pagefault_disable(void)
  187. {
  188. pagefault_disabled_inc();
  189. /*
  190. * make sure to have issued the store before a pagefault
  191. * can hit.
  192. */
  193. barrier();
  194. }
  195. static inline void pagefault_enable(void)
  196. {
  197. /*
  198. * make sure to issue those last loads/stores before enabling
  199. * the pagefault handler again.
  200. */
  201. barrier();
  202. pagefault_disabled_dec();
  203. }
  204. /*
  205. * Is the pagefault handler disabled? If so, user access methods will not sleep.
  206. */
  207. static inline bool pagefault_disabled(void)
  208. {
  209. return current->pagefault_disabled != 0;
  210. }
  211. /*
  212. * The pagefault handler is in general disabled by pagefault_disable() or
  213. * when in irq context (via in_atomic()).
  214. *
  215. * This function should only be used by the fault handlers. Other users should
  216. * stick to pagefault_disabled().
  217. * Please NEVER use preempt_disable() to disable the fault handler. With
  218. * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
  219. * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
  220. */
  221. #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
  222. #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
  223. /**
  224. * probe_subpage_writeable: probe the user range for write faults at sub-page
  225. * granularity (e.g. arm64 MTE)
  226. * @uaddr: start of address range
  227. * @size: size of address range
  228. *
  229. * Returns 0 on success, the number of bytes not probed on fault.
  230. *
  231. * It is expected that the caller checked for the write permission of each
  232. * page in the range either by put_user() or GUP. The architecture port can
  233. * implement a more efficient get_user() probing if the same sub-page faults
  234. * are triggered by either a read or a write.
  235. */
  236. static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
  237. {
  238. return 0;
  239. }
  240. #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
  241. #ifndef ARCH_HAS_NOCACHE_UACCESS
  242. static inline __must_check unsigned long
  243. __copy_from_user_inatomic_nocache(void *to, const void __user *from,
  244. unsigned long n)
  245. {
  246. return __copy_from_user_inatomic(to, from, n);
  247. }
  248. #endif /* ARCH_HAS_NOCACHE_UACCESS */
  249. extern __must_check int check_zeroed_user(const void __user *from, size_t size);
  250. /**
  251. * copy_struct_from_user: copy a struct from userspace
  252. * @dst: Destination address, in kernel space. This buffer must be @ksize
  253. * bytes long.
  254. * @ksize: Size of @dst struct.
  255. * @src: Source address, in userspace.
  256. * @usize: (Alleged) size of @src struct.
  257. *
  258. * Copies a struct from userspace to kernel space, in a way that guarantees
  259. * backwards-compatibility for struct syscall arguments (as long as future
  260. * struct extensions are made such that all new fields are *appended* to the
  261. * old struct, and zeroed-out new fields have the same meaning as the old
  262. * struct).
  263. *
  264. * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
  265. * The recommended usage is something like the following:
  266. *
  267. * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
  268. * {
  269. * int err;
  270. * struct foo karg = {};
  271. *
  272. * if (usize > PAGE_SIZE)
  273. * return -E2BIG;
  274. * if (usize < FOO_SIZE_VER0)
  275. * return -EINVAL;
  276. *
  277. * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
  278. * if (err)
  279. * return err;
  280. *
  281. * // ...
  282. * }
  283. *
  284. * There are three cases to consider:
  285. * * If @usize == @ksize, then it's copied verbatim.
  286. * * If @usize < @ksize, then the userspace has passed an old struct to a
  287. * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
  288. * are to be zero-filled.
  289. * * If @usize > @ksize, then the userspace has passed a new struct to an
  290. * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
  291. * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
  292. *
  293. * Returns (in all cases, some data may have been copied):
  294. * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
  295. * * -EFAULT: access to userspace failed.
  296. */
  297. static __always_inline __must_check int
  298. copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
  299. size_t usize)
  300. {
  301. size_t size = min(ksize, usize);
  302. size_t rest = max(ksize, usize) - size;
  303. /* Double check if ksize is larger than a known object size. */
  304. if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
  305. return -E2BIG;
  306. /* Deal with trailing bytes. */
  307. if (usize < ksize) {
  308. memset(dst + size, 0, rest);
  309. } else if (usize > ksize) {
  310. int ret = check_zeroed_user(src + size, rest);
  311. if (ret <= 0)
  312. return ret ?: -E2BIG;
  313. }
  314. /* Copy the interoperable parts of the struct. */
  315. if (copy_from_user(dst, src, size))
  316. return -EFAULT;
  317. return 0;
  318. }
  319. bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
  320. long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
  321. long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
  322. long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
  323. long notrace copy_to_user_nofault(void __user *dst, const void *src,
  324. size_t size);
  325. long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
  326. long count);
  327. long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
  328. long count);
  329. long strnlen_user_nofault(const void __user *unsafe_addr, long count);
  330. #ifndef __get_kernel_nofault
  331. #define __get_kernel_nofault(dst, src, type, label) \
  332. do { \
  333. type __user *p = (type __force __user *)(src); \
  334. type data; \
  335. if (__get_user(data, p)) \
  336. goto label; \
  337. *(type *)dst = data; \
  338. } while (0)
  339. #define __put_kernel_nofault(dst, src, type, label) \
  340. do { \
  341. type __user *p = (type __force __user *)(dst); \
  342. type data = *(type *)src; \
  343. if (__put_user(data, p)) \
  344. goto label; \
  345. } while (0)
  346. #endif
  347. /**
  348. * get_kernel_nofault(): safely attempt to read from a location
  349. * @val: read into this variable
  350. * @ptr: address to read from
  351. *
  352. * Returns 0 on success, or -EFAULT.
  353. */
  354. #define get_kernel_nofault(val, ptr) ({ \
  355. const typeof(val) *__gk_ptr = (ptr); \
  356. copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
  357. })
  358. #ifndef user_access_begin
  359. #define user_access_begin(ptr,len) access_ok(ptr, len)
  360. #define user_access_end() do { } while (0)
  361. #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
  362. #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
  363. #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
  364. #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
  365. #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
  366. static inline unsigned long user_access_save(void) { return 0UL; }
  367. static inline void user_access_restore(unsigned long flags) { }
  368. #endif
  369. #ifndef user_write_access_begin
  370. #define user_write_access_begin user_access_begin
  371. #define user_write_access_end user_access_end
  372. #endif
  373. #ifndef user_read_access_begin
  374. #define user_read_access_begin user_access_begin
  375. #define user_read_access_end user_access_end
  376. #endif
  377. #ifdef CONFIG_HARDENED_USERCOPY
  378. void __noreturn usercopy_abort(const char *name, const char *detail,
  379. bool to_user, unsigned long offset,
  380. unsigned long len);
  381. #endif
  382. #endif /* __LINUX_UACCESS_H__ */