uio.h 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Berkeley style UIO structures - Alan Cox 1994.
  4. */
  5. #ifndef __LINUX_UIO_H
  6. #define __LINUX_UIO_H
  7. #include <linux/kernel.h>
  8. #include <linux/thread_info.h>
  9. #include <linux/mm_types.h>
  10. #include <uapi/linux/uio.h>
  11. struct page;
  12. struct pipe_inode_info;
  13. struct kvec {
  14. void *iov_base; /* and that should *never* hold a userland pointer */
  15. size_t iov_len;
  16. };
  17. enum iter_type {
  18. /* iter types */
  19. ITER_IOVEC,
  20. ITER_KVEC,
  21. ITER_BVEC,
  22. ITER_PIPE,
  23. ITER_XARRAY,
  24. ITER_DISCARD,
  25. ITER_UBUF,
  26. };
  27. #define ITER_SOURCE 1 // == WRITE
  28. #define ITER_DEST 0 // == READ
  29. struct iov_iter_state {
  30. size_t iov_offset;
  31. size_t count;
  32. unsigned long nr_segs;
  33. };
  34. struct iov_iter {
  35. u8 iter_type;
  36. bool nofault;
  37. bool data_source;
  38. bool user_backed;
  39. union {
  40. size_t iov_offset;
  41. int last_offset;
  42. };
  43. size_t count;
  44. union {
  45. const struct iovec *iov;
  46. const struct kvec *kvec;
  47. const struct bio_vec *bvec;
  48. struct xarray *xarray;
  49. struct pipe_inode_info *pipe;
  50. void __user *ubuf;
  51. };
  52. union {
  53. unsigned long nr_segs;
  54. struct {
  55. unsigned int head;
  56. unsigned int start_head;
  57. };
  58. loff_t xarray_start;
  59. };
  60. };
  61. static inline enum iter_type iov_iter_type(const struct iov_iter *i)
  62. {
  63. return i->iter_type;
  64. }
  65. static inline void iov_iter_save_state(struct iov_iter *iter,
  66. struct iov_iter_state *state)
  67. {
  68. state->iov_offset = iter->iov_offset;
  69. state->count = iter->count;
  70. state->nr_segs = iter->nr_segs;
  71. }
  72. static inline bool iter_is_ubuf(const struct iov_iter *i)
  73. {
  74. return iov_iter_type(i) == ITER_UBUF;
  75. }
  76. static inline bool iter_is_iovec(const struct iov_iter *i)
  77. {
  78. return iov_iter_type(i) == ITER_IOVEC;
  79. }
  80. static inline bool iov_iter_is_kvec(const struct iov_iter *i)
  81. {
  82. return iov_iter_type(i) == ITER_KVEC;
  83. }
  84. static inline bool iov_iter_is_bvec(const struct iov_iter *i)
  85. {
  86. return iov_iter_type(i) == ITER_BVEC;
  87. }
  88. static inline bool iov_iter_is_pipe(const struct iov_iter *i)
  89. {
  90. return iov_iter_type(i) == ITER_PIPE;
  91. }
  92. static inline bool iov_iter_is_discard(const struct iov_iter *i)
  93. {
  94. return iov_iter_type(i) == ITER_DISCARD;
  95. }
  96. static inline bool iov_iter_is_xarray(const struct iov_iter *i)
  97. {
  98. return iov_iter_type(i) == ITER_XARRAY;
  99. }
  100. static inline unsigned char iov_iter_rw(const struct iov_iter *i)
  101. {
  102. return i->data_source ? WRITE : READ;
  103. }
  104. static inline bool user_backed_iter(const struct iov_iter *i)
  105. {
  106. return i->user_backed;
  107. }
  108. /*
  109. * Total number of bytes covered by an iovec.
  110. *
  111. * NOTE that it is not safe to use this function until all the iovec's
  112. * segment lengths have been validated. Because the individual lengths can
  113. * overflow a size_t when added together.
  114. */
  115. static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
  116. {
  117. unsigned long seg;
  118. size_t ret = 0;
  119. for (seg = 0; seg < nr_segs; seg++)
  120. ret += iov[seg].iov_len;
  121. return ret;
  122. }
  123. static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
  124. {
  125. return (struct iovec) {
  126. .iov_base = iter->iov->iov_base + iter->iov_offset,
  127. .iov_len = min(iter->count,
  128. iter->iov->iov_len - iter->iov_offset),
  129. };
  130. }
  131. size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
  132. size_t bytes, struct iov_iter *i);
  133. void iov_iter_advance(struct iov_iter *i, size_t bytes);
  134. void iov_iter_revert(struct iov_iter *i, size_t bytes);
  135. size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
  136. size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
  137. size_t iov_iter_single_seg_count(const struct iov_iter *i);
  138. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  139. struct iov_iter *i);
  140. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  141. struct iov_iter *i);
  142. size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
  143. size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
  144. size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
  145. static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
  146. size_t bytes, struct iov_iter *i)
  147. {
  148. return copy_page_to_iter(&folio->page, offset, bytes, i);
  149. }
  150. static __always_inline __must_check
  151. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  152. {
  153. if (check_copy_size(addr, bytes, true))
  154. return _copy_to_iter(addr, bytes, i);
  155. return 0;
  156. }
  157. static __always_inline __must_check
  158. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  159. {
  160. if (check_copy_size(addr, bytes, false))
  161. return _copy_from_iter(addr, bytes, i);
  162. return 0;
  163. }
  164. static __always_inline __must_check
  165. bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
  166. {
  167. size_t copied = copy_from_iter(addr, bytes, i);
  168. if (likely(copied == bytes))
  169. return true;
  170. iov_iter_revert(i, copied);
  171. return false;
  172. }
  173. static __always_inline __must_check
  174. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  175. {
  176. if (check_copy_size(addr, bytes, false))
  177. return _copy_from_iter_nocache(addr, bytes, i);
  178. return 0;
  179. }
  180. static __always_inline __must_check
  181. bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  182. {
  183. size_t copied = copy_from_iter_nocache(addr, bytes, i);
  184. if (likely(copied == bytes))
  185. return true;
  186. iov_iter_revert(i, copied);
  187. return false;
  188. }
  189. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  190. /*
  191. * Note, users like pmem that depend on the stricter semantics of
  192. * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
  193. * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
  194. * destination is flushed from the cache on return.
  195. */
  196. size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
  197. #else
  198. #define _copy_from_iter_flushcache _copy_from_iter_nocache
  199. #endif
  200. #ifdef CONFIG_ARCH_HAS_COPY_MC
  201. size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
  202. #else
  203. #define _copy_mc_to_iter _copy_to_iter
  204. #endif
  205. size_t iov_iter_zero(size_t bytes, struct iov_iter *);
  206. bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
  207. unsigned len_mask);
  208. unsigned long iov_iter_alignment(const struct iov_iter *i);
  209. unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
  210. void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
  211. unsigned long nr_segs, size_t count);
  212. void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
  213. unsigned long nr_segs, size_t count);
  214. void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
  215. unsigned long nr_segs, size_t count);
  216. void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
  217. size_t count);
  218. void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
  219. void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
  220. loff_t start, size_t count);
  221. ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
  222. size_t maxsize, unsigned maxpages, size_t *start);
  223. ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
  224. size_t maxsize, size_t *start);
  225. int iov_iter_npages(const struct iov_iter *i, int maxpages);
  226. void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
  227. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
  228. static inline size_t iov_iter_count(const struct iov_iter *i)
  229. {
  230. return i->count;
  231. }
  232. /*
  233. * Cap the iov_iter by given limit; note that the second argument is
  234. * *not* the new size - it's upper limit for such. Passing it a value
  235. * greater than the amount of data in iov_iter is fine - it'll just do
  236. * nothing in that case.
  237. */
  238. static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  239. {
  240. /*
  241. * count doesn't have to fit in size_t - comparison extends both
  242. * operands to u64 here and any value that would be truncated by
  243. * conversion in assignement is by definition greater than all
  244. * values of size_t, including old i->count.
  245. */
  246. if (i->count > count)
  247. i->count = count;
  248. }
  249. /*
  250. * reexpand a previously truncated iterator; count must be no more than how much
  251. * we had shrunk it.
  252. */
  253. static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
  254. {
  255. i->count = count;
  256. }
  257. static inline int
  258. iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
  259. {
  260. size_t shorted = 0;
  261. int npages;
  262. if (iov_iter_count(i) > max_bytes) {
  263. shorted = iov_iter_count(i) - max_bytes;
  264. iov_iter_truncate(i, max_bytes);
  265. }
  266. npages = iov_iter_npages(i, maxpages);
  267. if (shorted)
  268. iov_iter_reexpand(i, iov_iter_count(i) + shorted);
  269. return npages;
  270. }
  271. struct csum_state {
  272. __wsum csum;
  273. size_t off;
  274. };
  275. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
  276. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  277. static __always_inline __must_check
  278. bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
  279. __wsum *csum, struct iov_iter *i)
  280. {
  281. size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
  282. if (likely(copied == bytes))
  283. return true;
  284. iov_iter_revert(i, copied);
  285. return false;
  286. }
  287. size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
  288. struct iov_iter *i);
  289. struct iovec *iovec_from_user(const struct iovec __user *uvector,
  290. unsigned long nr_segs, unsigned long fast_segs,
  291. struct iovec *fast_iov, bool compat);
  292. ssize_t import_iovec(int type, const struct iovec __user *uvec,
  293. unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
  294. struct iov_iter *i);
  295. ssize_t __import_iovec(int type, const struct iovec __user *uvec,
  296. unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
  297. struct iov_iter *i, bool compat);
  298. int import_single_range(int type, void __user *buf, size_t len,
  299. struct iovec *iov, struct iov_iter *i);
  300. static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
  301. void __user *buf, size_t count)
  302. {
  303. WARN_ON(direction & ~(READ | WRITE));
  304. *i = (struct iov_iter) {
  305. .iter_type = ITER_UBUF,
  306. .user_backed = true,
  307. .data_source = direction,
  308. .ubuf = buf,
  309. .count = count
  310. };
  311. }
  312. #endif