iov_iter.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <crypto/hash.h>
  3. #include <linux/export.h>
  4. #include <linux/bvec.h>
  5. #include <linux/fault-inject-usercopy.h>
  6. #include <linux/uio.h>
  7. #include <linux/pagemap.h>
  8. #include <linux/highmem.h>
  9. #include <linux/slab.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/splice.h>
  12. #include <linux/compat.h>
  13. #include <net/checksum.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/instrumented.h>
  16. #define PIPE_PARANOIA /* for now */
  17. /* covers ubuf and kbuf alike */
  18. #define iterate_buf(i, n, base, len, off, __p, STEP) { \
  19. size_t __maybe_unused off = 0; \
  20. len = n; \
  21. base = __p + i->iov_offset; \
  22. len -= (STEP); \
  23. i->iov_offset += len; \
  24. n = len; \
  25. }
  26. /* covers iovec and kvec alike */
  27. #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
  28. size_t off = 0; \
  29. size_t skip = i->iov_offset; \
  30. do { \
  31. len = min(n, __p->iov_len - skip); \
  32. if (likely(len)) { \
  33. base = __p->iov_base + skip; \
  34. len -= (STEP); \
  35. off += len; \
  36. skip += len; \
  37. n -= len; \
  38. if (skip < __p->iov_len) \
  39. break; \
  40. } \
  41. __p++; \
  42. skip = 0; \
  43. } while (n); \
  44. i->iov_offset = skip; \
  45. n = off; \
  46. }
  47. #define iterate_bvec(i, n, base, len, off, p, STEP) { \
  48. size_t off = 0; \
  49. unsigned skip = i->iov_offset; \
  50. while (n) { \
  51. unsigned offset = p->bv_offset + skip; \
  52. unsigned left; \
  53. void *kaddr = kmap_local_page(p->bv_page + \
  54. offset / PAGE_SIZE); \
  55. base = kaddr + offset % PAGE_SIZE; \
  56. len = min(min(n, (size_t)(p->bv_len - skip)), \
  57. (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
  58. left = (STEP); \
  59. kunmap_local(kaddr); \
  60. len -= left; \
  61. off += len; \
  62. skip += len; \
  63. if (skip == p->bv_len) { \
  64. skip = 0; \
  65. p++; \
  66. } \
  67. n -= len; \
  68. if (left) \
  69. break; \
  70. } \
  71. i->iov_offset = skip; \
  72. n = off; \
  73. }
  74. #define iterate_xarray(i, n, base, len, __off, STEP) { \
  75. __label__ __out; \
  76. size_t __off = 0; \
  77. struct folio *folio; \
  78. loff_t start = i->xarray_start + i->iov_offset; \
  79. pgoff_t index = start / PAGE_SIZE; \
  80. XA_STATE(xas, i->xarray, index); \
  81. \
  82. len = PAGE_SIZE - offset_in_page(start); \
  83. rcu_read_lock(); \
  84. xas_for_each(&xas, folio, ULONG_MAX) { \
  85. unsigned left; \
  86. size_t offset; \
  87. if (xas_retry(&xas, folio)) \
  88. continue; \
  89. if (WARN_ON(xa_is_value(folio))) \
  90. break; \
  91. if (WARN_ON(folio_test_hugetlb(folio))) \
  92. break; \
  93. offset = offset_in_folio(folio, start + __off); \
  94. while (offset < folio_size(folio)) { \
  95. base = kmap_local_folio(folio, offset); \
  96. len = min(n, len); \
  97. left = (STEP); \
  98. kunmap_local(base); \
  99. len -= left; \
  100. __off += len; \
  101. n -= len; \
  102. if (left || n == 0) \
  103. goto __out; \
  104. offset += len; \
  105. len = PAGE_SIZE; \
  106. } \
  107. } \
  108. __out: \
  109. rcu_read_unlock(); \
  110. i->iov_offset += __off; \
  111. n = __off; \
  112. }
  113. #define __iterate_and_advance(i, n, base, len, off, I, K) { \
  114. if (unlikely(i->count < n)) \
  115. n = i->count; \
  116. if (likely(n)) { \
  117. if (likely(iter_is_ubuf(i))) { \
  118. void __user *base; \
  119. size_t len; \
  120. iterate_buf(i, n, base, len, off, \
  121. i->ubuf, (I)) \
  122. } else if (likely(iter_is_iovec(i))) { \
  123. const struct iovec *iov = i->iov; \
  124. void __user *base; \
  125. size_t len; \
  126. iterate_iovec(i, n, base, len, off, \
  127. iov, (I)) \
  128. i->nr_segs -= iov - i->iov; \
  129. i->iov = iov; \
  130. } else if (iov_iter_is_bvec(i)) { \
  131. const struct bio_vec *bvec = i->bvec; \
  132. void *base; \
  133. size_t len; \
  134. iterate_bvec(i, n, base, len, off, \
  135. bvec, (K)) \
  136. i->nr_segs -= bvec - i->bvec; \
  137. i->bvec = bvec; \
  138. } else if (iov_iter_is_kvec(i)) { \
  139. const struct kvec *kvec = i->kvec; \
  140. void *base; \
  141. size_t len; \
  142. iterate_iovec(i, n, base, len, off, \
  143. kvec, (K)) \
  144. i->nr_segs -= kvec - i->kvec; \
  145. i->kvec = kvec; \
  146. } else if (iov_iter_is_xarray(i)) { \
  147. void *base; \
  148. size_t len; \
  149. iterate_xarray(i, n, base, len, off, \
  150. (K)) \
  151. } \
  152. i->count -= n; \
  153. } \
  154. }
  155. #define iterate_and_advance(i, n, base, len, off, I, K) \
  156. __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
  157. static int copyout(void __user *to, const void *from, size_t n)
  158. {
  159. if (should_fail_usercopy())
  160. return n;
  161. if (access_ok(to, n)) {
  162. instrument_copy_to_user(to, from, n);
  163. n = raw_copy_to_user(to, from, n);
  164. }
  165. return n;
  166. }
  167. static int copyin(void *to, const void __user *from, size_t n)
  168. {
  169. size_t res = n;
  170. if (should_fail_usercopy())
  171. return n;
  172. if (access_ok(from, n)) {
  173. instrument_copy_from_user_before(to, from, n);
  174. res = raw_copy_from_user(to, from, n);
  175. instrument_copy_from_user_after(to, from, n, res);
  176. }
  177. return res;
  178. }
  179. static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
  180. unsigned int slot)
  181. {
  182. return &pipe->bufs[slot & (pipe->ring_size - 1)];
  183. }
  184. #ifdef PIPE_PARANOIA
  185. static bool sanity(const struct iov_iter *i)
  186. {
  187. struct pipe_inode_info *pipe = i->pipe;
  188. unsigned int p_head = pipe->head;
  189. unsigned int p_tail = pipe->tail;
  190. unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
  191. unsigned int i_head = i->head;
  192. unsigned int idx;
  193. if (i->last_offset) {
  194. struct pipe_buffer *p;
  195. if (unlikely(p_occupancy == 0))
  196. goto Bad; // pipe must be non-empty
  197. if (unlikely(i_head != p_head - 1))
  198. goto Bad; // must be at the last buffer...
  199. p = pipe_buf(pipe, i_head);
  200. if (unlikely(p->offset + p->len != abs(i->last_offset)))
  201. goto Bad; // ... at the end of segment
  202. } else {
  203. if (i_head != p_head)
  204. goto Bad; // must be right after the last buffer
  205. }
  206. return true;
  207. Bad:
  208. printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
  209. printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
  210. p_head, p_tail, pipe->ring_size);
  211. for (idx = 0; idx < pipe->ring_size; idx++)
  212. printk(KERN_ERR "[%p %p %d %d]\n",
  213. pipe->bufs[idx].ops,
  214. pipe->bufs[idx].page,
  215. pipe->bufs[idx].offset,
  216. pipe->bufs[idx].len);
  217. WARN_ON(1);
  218. return false;
  219. }
  220. #else
  221. #define sanity(i) true
  222. #endif
  223. static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
  224. {
  225. struct page *page = alloc_page(GFP_USER);
  226. if (page) {
  227. struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
  228. *buf = (struct pipe_buffer) {
  229. .ops = &default_pipe_buf_ops,
  230. .page = page,
  231. .offset = 0,
  232. .len = size
  233. };
  234. }
  235. return page;
  236. }
  237. static void push_page(struct pipe_inode_info *pipe, struct page *page,
  238. unsigned int offset, unsigned int size)
  239. {
  240. struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
  241. *buf = (struct pipe_buffer) {
  242. .ops = &page_cache_pipe_buf_ops,
  243. .page = page,
  244. .offset = offset,
  245. .len = size
  246. };
  247. get_page(page);
  248. }
  249. static inline int last_offset(const struct pipe_buffer *buf)
  250. {
  251. if (buf->ops == &default_pipe_buf_ops)
  252. return buf->len; // buf->offset is 0 for those
  253. else
  254. return -(buf->offset + buf->len);
  255. }
  256. static struct page *append_pipe(struct iov_iter *i, size_t size,
  257. unsigned int *off)
  258. {
  259. struct pipe_inode_info *pipe = i->pipe;
  260. int offset = i->last_offset;
  261. struct pipe_buffer *buf;
  262. struct page *page;
  263. if (offset > 0 && offset < PAGE_SIZE) {
  264. // some space in the last buffer; add to it
  265. buf = pipe_buf(pipe, pipe->head - 1);
  266. size = min_t(size_t, size, PAGE_SIZE - offset);
  267. buf->len += size;
  268. i->last_offset += size;
  269. i->count -= size;
  270. *off = offset;
  271. return buf->page;
  272. }
  273. // OK, we need a new buffer
  274. *off = 0;
  275. size = min_t(size_t, size, PAGE_SIZE);
  276. if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
  277. return NULL;
  278. page = push_anon(pipe, size);
  279. if (!page)
  280. return NULL;
  281. i->head = pipe->head - 1;
  282. i->last_offset = size;
  283. i->count -= size;
  284. return page;
  285. }
  286. static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
  287. struct iov_iter *i)
  288. {
  289. struct pipe_inode_info *pipe = i->pipe;
  290. unsigned int head = pipe->head;
  291. if (unlikely(bytes > i->count))
  292. bytes = i->count;
  293. if (unlikely(!bytes))
  294. return 0;
  295. if (!sanity(i))
  296. return 0;
  297. if (offset && i->last_offset == -offset) { // could we merge it?
  298. struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
  299. if (buf->page == page) {
  300. buf->len += bytes;
  301. i->last_offset -= bytes;
  302. i->count -= bytes;
  303. return bytes;
  304. }
  305. }
  306. if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
  307. return 0;
  308. push_page(pipe, page, offset, bytes);
  309. i->last_offset = -(offset + bytes);
  310. i->head = head;
  311. i->count -= bytes;
  312. return bytes;
  313. }
  314. /*
  315. * fault_in_iov_iter_readable - fault in iov iterator for reading
  316. * @i: iterator
  317. * @size: maximum length
  318. *
  319. * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  320. * @size. For each iovec, fault in each page that constitutes the iovec.
  321. *
  322. * Returns the number of bytes not faulted in (like copy_to_user() and
  323. * copy_from_user()).
  324. *
  325. * Always returns 0 for non-userspace iterators.
  326. */
  327. size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
  328. {
  329. if (iter_is_ubuf(i)) {
  330. size_t n = min(size, iov_iter_count(i));
  331. n -= fault_in_readable(i->ubuf + i->iov_offset, n);
  332. return size - n;
  333. } else if (iter_is_iovec(i)) {
  334. size_t count = min(size, iov_iter_count(i));
  335. const struct iovec *p;
  336. size_t skip;
  337. size -= count;
  338. for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
  339. size_t len = min(count, p->iov_len - skip);
  340. size_t ret;
  341. if (unlikely(!len))
  342. continue;
  343. ret = fault_in_readable(p->iov_base + skip, len);
  344. count -= len - ret;
  345. if (ret)
  346. break;
  347. }
  348. return count + size;
  349. }
  350. return 0;
  351. }
  352. EXPORT_SYMBOL(fault_in_iov_iter_readable);
  353. /*
  354. * fault_in_iov_iter_writeable - fault in iov iterator for writing
  355. * @i: iterator
  356. * @size: maximum length
  357. *
  358. * Faults in the iterator using get_user_pages(), i.e., without triggering
  359. * hardware page faults. This is primarily useful when we already know that
  360. * some or all of the pages in @i aren't in memory.
  361. *
  362. * Returns the number of bytes not faulted in, like copy_to_user() and
  363. * copy_from_user().
  364. *
  365. * Always returns 0 for non-user-space iterators.
  366. */
  367. size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
  368. {
  369. if (iter_is_ubuf(i)) {
  370. size_t n = min(size, iov_iter_count(i));
  371. n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
  372. return size - n;
  373. } else if (iter_is_iovec(i)) {
  374. size_t count = min(size, iov_iter_count(i));
  375. const struct iovec *p;
  376. size_t skip;
  377. size -= count;
  378. for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
  379. size_t len = min(count, p->iov_len - skip);
  380. size_t ret;
  381. if (unlikely(!len))
  382. continue;
  383. ret = fault_in_safe_writeable(p->iov_base + skip, len);
  384. count -= len - ret;
  385. if (ret)
  386. break;
  387. }
  388. return count + size;
  389. }
  390. return 0;
  391. }
  392. EXPORT_SYMBOL(fault_in_iov_iter_writeable);
  393. void iov_iter_init(struct iov_iter *i, unsigned int direction,
  394. const struct iovec *iov, unsigned long nr_segs,
  395. size_t count)
  396. {
  397. WARN_ON(direction & ~(READ | WRITE));
  398. *i = (struct iov_iter) {
  399. .iter_type = ITER_IOVEC,
  400. .nofault = false,
  401. .user_backed = true,
  402. .data_source = direction,
  403. .iov = iov,
  404. .nr_segs = nr_segs,
  405. .iov_offset = 0,
  406. .count = count
  407. };
  408. }
  409. EXPORT_SYMBOL(iov_iter_init);
  410. // returns the offset in partial buffer (if any)
  411. static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
  412. {
  413. struct pipe_inode_info *pipe = i->pipe;
  414. int used = pipe->head - pipe->tail;
  415. int off = i->last_offset;
  416. *npages = max((int)pipe->max_usage - used, 0);
  417. if (off > 0 && off < PAGE_SIZE) { // anon and not full
  418. (*npages)++;
  419. return off;
  420. }
  421. return 0;
  422. }
  423. static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
  424. struct iov_iter *i)
  425. {
  426. unsigned int off, chunk;
  427. if (unlikely(bytes > i->count))
  428. bytes = i->count;
  429. if (unlikely(!bytes))
  430. return 0;
  431. if (!sanity(i))
  432. return 0;
  433. for (size_t n = bytes; n; n -= chunk) {
  434. struct page *page = append_pipe(i, n, &off);
  435. chunk = min_t(size_t, n, PAGE_SIZE - off);
  436. if (!page)
  437. return bytes - n;
  438. memcpy_to_page(page, off, addr, chunk);
  439. addr += chunk;
  440. }
  441. return bytes;
  442. }
  443. static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
  444. __wsum sum, size_t off)
  445. {
  446. __wsum next = csum_partial_copy_nocheck(from, to, len);
  447. return csum_block_add(sum, next, off);
  448. }
  449. static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
  450. struct iov_iter *i, __wsum *sump)
  451. {
  452. __wsum sum = *sump;
  453. size_t off = 0;
  454. unsigned int chunk, r;
  455. if (unlikely(bytes > i->count))
  456. bytes = i->count;
  457. if (unlikely(!bytes))
  458. return 0;
  459. if (!sanity(i))
  460. return 0;
  461. while (bytes) {
  462. struct page *page = append_pipe(i, bytes, &r);
  463. char *p;
  464. if (!page)
  465. break;
  466. chunk = min_t(size_t, bytes, PAGE_SIZE - r);
  467. p = kmap_local_page(page);
  468. sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
  469. kunmap_local(p);
  470. off += chunk;
  471. bytes -= chunk;
  472. }
  473. *sump = sum;
  474. return off;
  475. }
  476. size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  477. {
  478. if (unlikely(iov_iter_is_pipe(i)))
  479. return copy_pipe_to_iter(addr, bytes, i);
  480. if (user_backed_iter(i))
  481. might_fault();
  482. iterate_and_advance(i, bytes, base, len, off,
  483. copyout(base, addr + off, len),
  484. memcpy(base, addr + off, len)
  485. )
  486. return bytes;
  487. }
  488. EXPORT_SYMBOL(_copy_to_iter);
  489. #ifdef CONFIG_ARCH_HAS_COPY_MC
  490. static int copyout_mc(void __user *to, const void *from, size_t n)
  491. {
  492. if (access_ok(to, n)) {
  493. instrument_copy_to_user(to, from, n);
  494. n = copy_mc_to_user((__force void *) to, from, n);
  495. }
  496. return n;
  497. }
  498. static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
  499. struct iov_iter *i)
  500. {
  501. size_t xfer = 0;
  502. unsigned int off, chunk;
  503. if (unlikely(bytes > i->count))
  504. bytes = i->count;
  505. if (unlikely(!bytes))
  506. return 0;
  507. if (!sanity(i))
  508. return 0;
  509. while (bytes) {
  510. struct page *page = append_pipe(i, bytes, &off);
  511. unsigned long rem;
  512. char *p;
  513. if (!page)
  514. break;
  515. chunk = min_t(size_t, bytes, PAGE_SIZE - off);
  516. p = kmap_local_page(page);
  517. rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
  518. chunk -= rem;
  519. kunmap_local(p);
  520. xfer += chunk;
  521. bytes -= chunk;
  522. if (rem) {
  523. iov_iter_revert(i, rem);
  524. break;
  525. }
  526. }
  527. return xfer;
  528. }
  529. /**
  530. * _copy_mc_to_iter - copy to iter with source memory error exception handling
  531. * @addr: source kernel address
  532. * @bytes: total transfer length
  533. * @i: destination iterator
  534. *
  535. * The pmem driver deploys this for the dax operation
  536. * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
  537. * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
  538. * successfully copied.
  539. *
  540. * The main differences between this and typical _copy_to_iter().
  541. *
  542. * * Typical tail/residue handling after a fault retries the copy
  543. * byte-by-byte until the fault happens again. Re-triggering machine
  544. * checks is potentially fatal so the implementation uses source
  545. * alignment and poison alignment assumptions to avoid re-triggering
  546. * hardware exceptions.
  547. *
  548. * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
  549. * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
  550. * a short copy.
  551. *
  552. * Return: number of bytes copied (may be %0)
  553. */
  554. size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  555. {
  556. if (unlikely(iov_iter_is_pipe(i)))
  557. return copy_mc_pipe_to_iter(addr, bytes, i);
  558. if (user_backed_iter(i))
  559. might_fault();
  560. __iterate_and_advance(i, bytes, base, len, off,
  561. copyout_mc(base, addr + off, len),
  562. copy_mc_to_kernel(base, addr + off, len)
  563. )
  564. return bytes;
  565. }
  566. EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
  567. #endif /* CONFIG_ARCH_HAS_COPY_MC */
  568. size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  569. {
  570. if (unlikely(iov_iter_is_pipe(i))) {
  571. WARN_ON(1);
  572. return 0;
  573. }
  574. if (user_backed_iter(i))
  575. might_fault();
  576. iterate_and_advance(i, bytes, base, len, off,
  577. copyin(addr + off, base, len),
  578. memcpy(addr + off, base, len)
  579. )
  580. return bytes;
  581. }
  582. EXPORT_SYMBOL(_copy_from_iter);
  583. size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  584. {
  585. if (unlikely(iov_iter_is_pipe(i))) {
  586. WARN_ON(1);
  587. return 0;
  588. }
  589. iterate_and_advance(i, bytes, base, len, off,
  590. __copy_from_user_inatomic_nocache(addr + off, base, len),
  591. memcpy(addr + off, base, len)
  592. )
  593. return bytes;
  594. }
  595. EXPORT_SYMBOL(_copy_from_iter_nocache);
  596. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  597. /**
  598. * _copy_from_iter_flushcache - write destination through cpu cache
  599. * @addr: destination kernel address
  600. * @bytes: total transfer length
  601. * @i: source iterator
  602. *
  603. * The pmem driver arranges for filesystem-dax to use this facility via
  604. * dax_copy_from_iter() for ensuring that writes to persistent memory
  605. * are flushed through the CPU cache. It is differentiated from
  606. * _copy_from_iter_nocache() in that guarantees all data is flushed for
  607. * all iterator types. The _copy_from_iter_nocache() only attempts to
  608. * bypass the cache for the ITER_IOVEC case, and on some archs may use
  609. * instructions that strand dirty-data in the cache.
  610. *
  611. * Return: number of bytes copied (may be %0)
  612. */
  613. size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
  614. {
  615. if (unlikely(iov_iter_is_pipe(i))) {
  616. WARN_ON(1);
  617. return 0;
  618. }
  619. iterate_and_advance(i, bytes, base, len, off,
  620. __copy_from_user_flushcache(addr + off, base, len),
  621. memcpy_flushcache(addr + off, base, len)
  622. )
  623. return bytes;
  624. }
  625. EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
  626. #endif
  627. static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
  628. {
  629. struct page *head;
  630. size_t v = n + offset;
  631. /*
  632. * The general case needs to access the page order in order
  633. * to compute the page size.
  634. * However, we mostly deal with order-0 pages and thus can
  635. * avoid a possible cache line miss for requests that fit all
  636. * page orders.
  637. */
  638. if (n <= v && v <= PAGE_SIZE)
  639. return true;
  640. head = compound_head(page);
  641. v += (page - head) << PAGE_SHIFT;
  642. if (likely(n <= v && v <= (page_size(head))))
  643. return true;
  644. WARN_ON(1);
  645. return false;
  646. }
  647. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  648. struct iov_iter *i)
  649. {
  650. size_t res = 0;
  651. if (unlikely(!page_copy_sane(page, offset, bytes)))
  652. return 0;
  653. if (unlikely(iov_iter_is_pipe(i)))
  654. return copy_page_to_iter_pipe(page, offset, bytes, i);
  655. page += offset / PAGE_SIZE; // first subpage
  656. offset %= PAGE_SIZE;
  657. while (1) {
  658. void *kaddr = kmap_local_page(page);
  659. size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
  660. n = _copy_to_iter(kaddr + offset, n, i);
  661. kunmap_local(kaddr);
  662. res += n;
  663. bytes -= n;
  664. if (!bytes || !n)
  665. break;
  666. offset += n;
  667. if (offset == PAGE_SIZE) {
  668. page++;
  669. offset = 0;
  670. }
  671. }
  672. return res;
  673. }
  674. EXPORT_SYMBOL(copy_page_to_iter);
  675. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  676. struct iov_iter *i)
  677. {
  678. size_t res = 0;
  679. if (!page_copy_sane(page, offset, bytes))
  680. return 0;
  681. page += offset / PAGE_SIZE; // first subpage
  682. offset %= PAGE_SIZE;
  683. while (1) {
  684. void *kaddr = kmap_local_page(page);
  685. size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
  686. n = _copy_from_iter(kaddr + offset, n, i);
  687. kunmap_local(kaddr);
  688. res += n;
  689. bytes -= n;
  690. if (!bytes || !n)
  691. break;
  692. offset += n;
  693. if (offset == PAGE_SIZE) {
  694. page++;
  695. offset = 0;
  696. }
  697. }
  698. return res;
  699. }
  700. EXPORT_SYMBOL(copy_page_from_iter);
  701. static size_t pipe_zero(size_t bytes, struct iov_iter *i)
  702. {
  703. unsigned int chunk, off;
  704. if (unlikely(bytes > i->count))
  705. bytes = i->count;
  706. if (unlikely(!bytes))
  707. return 0;
  708. if (!sanity(i))
  709. return 0;
  710. for (size_t n = bytes; n; n -= chunk) {
  711. struct page *page = append_pipe(i, n, &off);
  712. char *p;
  713. if (!page)
  714. return bytes - n;
  715. chunk = min_t(size_t, n, PAGE_SIZE - off);
  716. p = kmap_local_page(page);
  717. memset(p + off, 0, chunk);
  718. kunmap_local(p);
  719. }
  720. return bytes;
  721. }
  722. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  723. {
  724. if (unlikely(iov_iter_is_pipe(i)))
  725. return pipe_zero(bytes, i);
  726. iterate_and_advance(i, bytes, base, len, count,
  727. clear_user(base, len),
  728. memset(base, 0, len)
  729. )
  730. return bytes;
  731. }
  732. EXPORT_SYMBOL(iov_iter_zero);
  733. size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
  734. struct iov_iter *i)
  735. {
  736. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  737. if (unlikely(!page_copy_sane(page, offset, bytes))) {
  738. kunmap_atomic(kaddr);
  739. return 0;
  740. }
  741. if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
  742. kunmap_atomic(kaddr);
  743. WARN_ON(1);
  744. return 0;
  745. }
  746. iterate_and_advance(i, bytes, base, len, off,
  747. copyin(p + off, base, len),
  748. memcpy(p + off, base, len)
  749. )
  750. kunmap_atomic(kaddr);
  751. return bytes;
  752. }
  753. EXPORT_SYMBOL(copy_page_from_iter_atomic);
  754. static void pipe_advance(struct iov_iter *i, size_t size)
  755. {
  756. struct pipe_inode_info *pipe = i->pipe;
  757. int off = i->last_offset;
  758. if (!off && !size) {
  759. pipe_discard_from(pipe, i->start_head); // discard everything
  760. return;
  761. }
  762. i->count -= size;
  763. while (1) {
  764. struct pipe_buffer *buf = pipe_buf(pipe, i->head);
  765. if (off) /* make it relative to the beginning of buffer */
  766. size += abs(off) - buf->offset;
  767. if (size <= buf->len) {
  768. buf->len = size;
  769. i->last_offset = last_offset(buf);
  770. break;
  771. }
  772. size -= buf->len;
  773. i->head++;
  774. off = 0;
  775. }
  776. pipe_discard_from(pipe, i->head + 1); // discard everything past this one
  777. }
  778. static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
  779. {
  780. const struct bio_vec *bvec, *end;
  781. if (!i->count)
  782. return;
  783. i->count -= size;
  784. size += i->iov_offset;
  785. for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
  786. if (likely(size < bvec->bv_len))
  787. break;
  788. size -= bvec->bv_len;
  789. }
  790. i->iov_offset = size;
  791. i->nr_segs -= bvec - i->bvec;
  792. i->bvec = bvec;
  793. }
  794. static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
  795. {
  796. const struct iovec *iov, *end;
  797. if (!i->count)
  798. return;
  799. i->count -= size;
  800. size += i->iov_offset; // from beginning of current segment
  801. for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
  802. if (likely(size < iov->iov_len))
  803. break;
  804. size -= iov->iov_len;
  805. }
  806. i->iov_offset = size;
  807. i->nr_segs -= iov - i->iov;
  808. i->iov = iov;
  809. }
  810. void iov_iter_advance(struct iov_iter *i, size_t size)
  811. {
  812. if (unlikely(i->count < size))
  813. size = i->count;
  814. if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
  815. i->iov_offset += size;
  816. i->count -= size;
  817. } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
  818. /* iovec and kvec have identical layouts */
  819. iov_iter_iovec_advance(i, size);
  820. } else if (iov_iter_is_bvec(i)) {
  821. iov_iter_bvec_advance(i, size);
  822. } else if (iov_iter_is_pipe(i)) {
  823. pipe_advance(i, size);
  824. } else if (iov_iter_is_discard(i)) {
  825. i->count -= size;
  826. }
  827. }
  828. EXPORT_SYMBOL(iov_iter_advance);
  829. void iov_iter_revert(struct iov_iter *i, size_t unroll)
  830. {
  831. if (!unroll)
  832. return;
  833. if (WARN_ON(unroll > MAX_RW_COUNT))
  834. return;
  835. i->count += unroll;
  836. if (unlikely(iov_iter_is_pipe(i))) {
  837. struct pipe_inode_info *pipe = i->pipe;
  838. unsigned int head = pipe->head;
  839. while (head > i->start_head) {
  840. struct pipe_buffer *b = pipe_buf(pipe, --head);
  841. if (unroll < b->len) {
  842. b->len -= unroll;
  843. i->last_offset = last_offset(b);
  844. i->head = head;
  845. return;
  846. }
  847. unroll -= b->len;
  848. pipe_buf_release(pipe, b);
  849. pipe->head--;
  850. }
  851. i->last_offset = 0;
  852. i->head = head;
  853. return;
  854. }
  855. if (unlikely(iov_iter_is_discard(i)))
  856. return;
  857. if (unroll <= i->iov_offset) {
  858. i->iov_offset -= unroll;
  859. return;
  860. }
  861. unroll -= i->iov_offset;
  862. if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
  863. BUG(); /* We should never go beyond the start of the specified
  864. * range since we might then be straying into pages that
  865. * aren't pinned.
  866. */
  867. } else if (iov_iter_is_bvec(i)) {
  868. const struct bio_vec *bvec = i->bvec;
  869. while (1) {
  870. size_t n = (--bvec)->bv_len;
  871. i->nr_segs++;
  872. if (unroll <= n) {
  873. i->bvec = bvec;
  874. i->iov_offset = n - unroll;
  875. return;
  876. }
  877. unroll -= n;
  878. }
  879. } else { /* same logics for iovec and kvec */
  880. const struct iovec *iov = i->iov;
  881. while (1) {
  882. size_t n = (--iov)->iov_len;
  883. i->nr_segs++;
  884. if (unroll <= n) {
  885. i->iov = iov;
  886. i->iov_offset = n - unroll;
  887. return;
  888. }
  889. unroll -= n;
  890. }
  891. }
  892. }
  893. EXPORT_SYMBOL(iov_iter_revert);
  894. /*
  895. * Return the count of just the current iov_iter segment.
  896. */
  897. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  898. {
  899. if (i->nr_segs > 1) {
  900. if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
  901. return min(i->count, i->iov->iov_len - i->iov_offset);
  902. if (iov_iter_is_bvec(i))
  903. return min(i->count, i->bvec->bv_len - i->iov_offset);
  904. }
  905. return i->count;
  906. }
  907. EXPORT_SYMBOL(iov_iter_single_seg_count);
  908. void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
  909. const struct kvec *kvec, unsigned long nr_segs,
  910. size_t count)
  911. {
  912. WARN_ON(direction & ~(READ | WRITE));
  913. *i = (struct iov_iter){
  914. .iter_type = ITER_KVEC,
  915. .data_source = direction,
  916. .kvec = kvec,
  917. .nr_segs = nr_segs,
  918. .iov_offset = 0,
  919. .count = count
  920. };
  921. }
  922. EXPORT_SYMBOL(iov_iter_kvec);
  923. void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
  924. const struct bio_vec *bvec, unsigned long nr_segs,
  925. size_t count)
  926. {
  927. WARN_ON(direction & ~(READ | WRITE));
  928. *i = (struct iov_iter){
  929. .iter_type = ITER_BVEC,
  930. .data_source = direction,
  931. .bvec = bvec,
  932. .nr_segs = nr_segs,
  933. .iov_offset = 0,
  934. .count = count
  935. };
  936. }
  937. EXPORT_SYMBOL(iov_iter_bvec);
  938. void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
  939. struct pipe_inode_info *pipe,
  940. size_t count)
  941. {
  942. BUG_ON(direction != READ);
  943. WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
  944. *i = (struct iov_iter){
  945. .iter_type = ITER_PIPE,
  946. .data_source = false,
  947. .pipe = pipe,
  948. .head = pipe->head,
  949. .start_head = pipe->head,
  950. .last_offset = 0,
  951. .count = count
  952. };
  953. }
  954. EXPORT_SYMBOL(iov_iter_pipe);
  955. /**
  956. * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
  957. * @i: The iterator to initialise.
  958. * @direction: The direction of the transfer.
  959. * @xarray: The xarray to access.
  960. * @start: The start file position.
  961. * @count: The size of the I/O buffer in bytes.
  962. *
  963. * Set up an I/O iterator to either draw data out of the pages attached to an
  964. * inode or to inject data into those pages. The pages *must* be prevented
  965. * from evaporation, either by taking a ref on them or locking them by the
  966. * caller.
  967. */
  968. void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
  969. struct xarray *xarray, loff_t start, size_t count)
  970. {
  971. BUG_ON(direction & ~1);
  972. *i = (struct iov_iter) {
  973. .iter_type = ITER_XARRAY,
  974. .data_source = direction,
  975. .xarray = xarray,
  976. .xarray_start = start,
  977. .count = count,
  978. .iov_offset = 0
  979. };
  980. }
  981. EXPORT_SYMBOL(iov_iter_xarray);
  982. /**
  983. * iov_iter_discard - Initialise an I/O iterator that discards data
  984. * @i: The iterator to initialise.
  985. * @direction: The direction of the transfer.
  986. * @count: The size of the I/O buffer in bytes.
  987. *
  988. * Set up an I/O iterator that just discards everything that's written to it.
  989. * It's only available as a READ iterator.
  990. */
  991. void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
  992. {
  993. BUG_ON(direction != READ);
  994. *i = (struct iov_iter){
  995. .iter_type = ITER_DISCARD,
  996. .data_source = false,
  997. .count = count,
  998. .iov_offset = 0
  999. };
  1000. }
  1001. EXPORT_SYMBOL(iov_iter_discard);
  1002. static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
  1003. unsigned len_mask)
  1004. {
  1005. size_t size = i->count;
  1006. size_t skip = i->iov_offset;
  1007. unsigned k;
  1008. for (k = 0; k < i->nr_segs; k++, skip = 0) {
  1009. size_t len = i->iov[k].iov_len - skip;
  1010. if (len > size)
  1011. len = size;
  1012. if (len & len_mask)
  1013. return false;
  1014. if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
  1015. return false;
  1016. size -= len;
  1017. if (!size)
  1018. break;
  1019. }
  1020. return true;
  1021. }
  1022. static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
  1023. unsigned len_mask)
  1024. {
  1025. size_t size = i->count;
  1026. unsigned skip = i->iov_offset;
  1027. unsigned k;
  1028. for (k = 0; k < i->nr_segs; k++, skip = 0) {
  1029. size_t len = i->bvec[k].bv_len - skip;
  1030. if (len > size)
  1031. len = size;
  1032. if (len & len_mask)
  1033. return false;
  1034. if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
  1035. return false;
  1036. size -= len;
  1037. if (!size)
  1038. break;
  1039. }
  1040. return true;
  1041. }
  1042. /**
  1043. * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
  1044. * are aligned to the parameters.
  1045. *
  1046. * @i: &struct iov_iter to restore
  1047. * @addr_mask: bit mask to check against the iov element's addresses
  1048. * @len_mask: bit mask to check against the iov element's lengths
  1049. *
  1050. * Return: false if any addresses or lengths intersect with the provided masks
  1051. */
  1052. bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
  1053. unsigned len_mask)
  1054. {
  1055. if (likely(iter_is_ubuf(i))) {
  1056. if (i->count & len_mask)
  1057. return false;
  1058. if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
  1059. return false;
  1060. return true;
  1061. }
  1062. if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
  1063. return iov_iter_aligned_iovec(i, addr_mask, len_mask);
  1064. if (iov_iter_is_bvec(i))
  1065. return iov_iter_aligned_bvec(i, addr_mask, len_mask);
  1066. if (iov_iter_is_pipe(i)) {
  1067. size_t size = i->count;
  1068. if (size & len_mask)
  1069. return false;
  1070. if (size && i->last_offset > 0) {
  1071. if (i->last_offset & addr_mask)
  1072. return false;
  1073. }
  1074. return true;
  1075. }
  1076. if (iov_iter_is_xarray(i)) {
  1077. if (i->count & len_mask)
  1078. return false;
  1079. if ((i->xarray_start + i->iov_offset) & addr_mask)
  1080. return false;
  1081. }
  1082. return true;
  1083. }
  1084. EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
  1085. static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
  1086. {
  1087. unsigned long res = 0;
  1088. size_t size = i->count;
  1089. size_t skip = i->iov_offset;
  1090. unsigned k;
  1091. for (k = 0; k < i->nr_segs; k++, skip = 0) {
  1092. size_t len = i->iov[k].iov_len - skip;
  1093. if (len) {
  1094. res |= (unsigned long)i->iov[k].iov_base + skip;
  1095. if (len > size)
  1096. len = size;
  1097. res |= len;
  1098. size -= len;
  1099. if (!size)
  1100. break;
  1101. }
  1102. }
  1103. return res;
  1104. }
  1105. static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
  1106. {
  1107. unsigned res = 0;
  1108. size_t size = i->count;
  1109. unsigned skip = i->iov_offset;
  1110. unsigned k;
  1111. for (k = 0; k < i->nr_segs; k++, skip = 0) {
  1112. size_t len = i->bvec[k].bv_len - skip;
  1113. res |= (unsigned long)i->bvec[k].bv_offset + skip;
  1114. if (len > size)
  1115. len = size;
  1116. res |= len;
  1117. size -= len;
  1118. if (!size)
  1119. break;
  1120. }
  1121. return res;
  1122. }
  1123. unsigned long iov_iter_alignment(const struct iov_iter *i)
  1124. {
  1125. if (likely(iter_is_ubuf(i))) {
  1126. size_t size = i->count;
  1127. if (size)
  1128. return ((unsigned long)i->ubuf + i->iov_offset) | size;
  1129. return 0;
  1130. }
  1131. /* iovec and kvec have identical layouts */
  1132. if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
  1133. return iov_iter_alignment_iovec(i);
  1134. if (iov_iter_is_bvec(i))
  1135. return iov_iter_alignment_bvec(i);
  1136. if (iov_iter_is_pipe(i)) {
  1137. size_t size = i->count;
  1138. if (size && i->last_offset > 0)
  1139. return size | i->last_offset;
  1140. return size;
  1141. }
  1142. if (iov_iter_is_xarray(i))
  1143. return (i->xarray_start + i->iov_offset) | i->count;
  1144. return 0;
  1145. }
  1146. EXPORT_SYMBOL(iov_iter_alignment);
  1147. unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
  1148. {
  1149. unsigned long res = 0;
  1150. unsigned long v = 0;
  1151. size_t size = i->count;
  1152. unsigned k;
  1153. if (iter_is_ubuf(i))
  1154. return 0;
  1155. if (WARN_ON(!iter_is_iovec(i)))
  1156. return ~0U;
  1157. for (k = 0; k < i->nr_segs; k++) {
  1158. if (i->iov[k].iov_len) {
  1159. unsigned long base = (unsigned long)i->iov[k].iov_base;
  1160. if (v) // if not the first one
  1161. res |= base | v; // this start | previous end
  1162. v = base + i->iov[k].iov_len;
  1163. if (size <= i->iov[k].iov_len)
  1164. break;
  1165. size -= i->iov[k].iov_len;
  1166. }
  1167. }
  1168. return res;
  1169. }
  1170. EXPORT_SYMBOL(iov_iter_gap_alignment);
  1171. static int want_pages_array(struct page ***res, size_t size,
  1172. size_t start, unsigned int maxpages)
  1173. {
  1174. unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
  1175. if (count > maxpages)
  1176. count = maxpages;
  1177. WARN_ON(!count); // caller should've prevented that
  1178. if (!*res) {
  1179. *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
  1180. if (!*res)
  1181. return 0;
  1182. }
  1183. return count;
  1184. }
  1185. static ssize_t pipe_get_pages(struct iov_iter *i,
  1186. struct page ***pages, size_t maxsize, unsigned maxpages,
  1187. size_t *start)
  1188. {
  1189. unsigned int npages, count, off, chunk;
  1190. struct page **p;
  1191. size_t left;
  1192. if (!sanity(i))
  1193. return -EFAULT;
  1194. *start = off = pipe_npages(i, &npages);
  1195. if (!npages)
  1196. return -EFAULT;
  1197. count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
  1198. if (!count)
  1199. return -ENOMEM;
  1200. p = *pages;
  1201. for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
  1202. struct page *page = append_pipe(i, left, &off);
  1203. if (!page)
  1204. break;
  1205. chunk = min_t(size_t, left, PAGE_SIZE - off);
  1206. get_page(*p++ = page);
  1207. }
  1208. if (!npages)
  1209. return -EFAULT;
  1210. return maxsize - left;
  1211. }
  1212. static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
  1213. pgoff_t index, unsigned int nr_pages)
  1214. {
  1215. XA_STATE(xas, xa, index);
  1216. struct page *page;
  1217. unsigned int ret = 0;
  1218. rcu_read_lock();
  1219. for (page = xas_load(&xas); page; page = xas_next(&xas)) {
  1220. if (xas_retry(&xas, page))
  1221. continue;
  1222. /* Has the page moved or been split? */
  1223. if (unlikely(page != xas_reload(&xas))) {
  1224. xas_reset(&xas);
  1225. continue;
  1226. }
  1227. pages[ret] = find_subpage(page, xas.xa_index);
  1228. get_page(pages[ret]);
  1229. if (++ret == nr_pages)
  1230. break;
  1231. }
  1232. rcu_read_unlock();
  1233. return ret;
  1234. }
  1235. static ssize_t iter_xarray_get_pages(struct iov_iter *i,
  1236. struct page ***pages, size_t maxsize,
  1237. unsigned maxpages, size_t *_start_offset)
  1238. {
  1239. unsigned nr, offset, count;
  1240. pgoff_t index;
  1241. loff_t pos;
  1242. pos = i->xarray_start + i->iov_offset;
  1243. index = pos >> PAGE_SHIFT;
  1244. offset = pos & ~PAGE_MASK;
  1245. *_start_offset = offset;
  1246. count = want_pages_array(pages, maxsize, offset, maxpages);
  1247. if (!count)
  1248. return -ENOMEM;
  1249. nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
  1250. if (nr == 0)
  1251. return 0;
  1252. maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
  1253. i->iov_offset += maxsize;
  1254. i->count -= maxsize;
  1255. return maxsize;
  1256. }
  1257. /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
  1258. static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
  1259. {
  1260. size_t skip;
  1261. long k;
  1262. if (iter_is_ubuf(i))
  1263. return (unsigned long)i->ubuf + i->iov_offset;
  1264. for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
  1265. size_t len = i->iov[k].iov_len - skip;
  1266. if (unlikely(!len))
  1267. continue;
  1268. if (*size > len)
  1269. *size = len;
  1270. return (unsigned long)i->iov[k].iov_base + skip;
  1271. }
  1272. BUG(); // if it had been empty, we wouldn't get called
  1273. }
  1274. /* must be done on non-empty ITER_BVEC one */
  1275. static struct page *first_bvec_segment(const struct iov_iter *i,
  1276. size_t *size, size_t *start)
  1277. {
  1278. struct page *page;
  1279. size_t skip = i->iov_offset, len;
  1280. len = i->bvec->bv_len - skip;
  1281. if (*size > len)
  1282. *size = len;
  1283. skip += i->bvec->bv_offset;
  1284. page = i->bvec->bv_page + skip / PAGE_SIZE;
  1285. *start = skip % PAGE_SIZE;
  1286. return page;
  1287. }
  1288. static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
  1289. struct page ***pages, size_t maxsize,
  1290. unsigned int maxpages, size_t *start)
  1291. {
  1292. unsigned int n;
  1293. if (maxsize > i->count)
  1294. maxsize = i->count;
  1295. if (!maxsize)
  1296. return 0;
  1297. if (maxsize > MAX_RW_COUNT)
  1298. maxsize = MAX_RW_COUNT;
  1299. if (likely(user_backed_iter(i))) {
  1300. unsigned int gup_flags = 0;
  1301. unsigned long addr;
  1302. int res;
  1303. if (iov_iter_rw(i) != WRITE)
  1304. gup_flags |= FOLL_WRITE;
  1305. if (i->nofault)
  1306. gup_flags |= FOLL_NOFAULT;
  1307. addr = first_iovec_segment(i, &maxsize);
  1308. *start = addr % PAGE_SIZE;
  1309. addr &= PAGE_MASK;
  1310. n = want_pages_array(pages, maxsize, *start, maxpages);
  1311. if (!n)
  1312. return -ENOMEM;
  1313. res = get_user_pages_fast(addr, n, gup_flags, *pages);
  1314. if (unlikely(res <= 0))
  1315. return res;
  1316. maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
  1317. iov_iter_advance(i, maxsize);
  1318. return maxsize;
  1319. }
  1320. if (iov_iter_is_bvec(i)) {
  1321. struct page **p;
  1322. struct page *page;
  1323. page = first_bvec_segment(i, &maxsize, start);
  1324. n = want_pages_array(pages, maxsize, *start, maxpages);
  1325. if (!n)
  1326. return -ENOMEM;
  1327. p = *pages;
  1328. for (int k = 0; k < n; k++)
  1329. get_page(p[k] = page + k);
  1330. maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
  1331. i->count -= maxsize;
  1332. i->iov_offset += maxsize;
  1333. if (i->iov_offset == i->bvec->bv_len) {
  1334. i->iov_offset = 0;
  1335. i->bvec++;
  1336. i->nr_segs--;
  1337. }
  1338. return maxsize;
  1339. }
  1340. if (iov_iter_is_pipe(i))
  1341. return pipe_get_pages(i, pages, maxsize, maxpages, start);
  1342. if (iov_iter_is_xarray(i))
  1343. return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
  1344. return -EFAULT;
  1345. }
  1346. ssize_t iov_iter_get_pages2(struct iov_iter *i,
  1347. struct page **pages, size_t maxsize, unsigned maxpages,
  1348. size_t *start)
  1349. {
  1350. if (!maxpages)
  1351. return 0;
  1352. BUG_ON(!pages);
  1353. return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
  1354. }
  1355. EXPORT_SYMBOL(iov_iter_get_pages2);
  1356. ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
  1357. struct page ***pages, size_t maxsize,
  1358. size_t *start)
  1359. {
  1360. ssize_t len;
  1361. *pages = NULL;
  1362. len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
  1363. if (len <= 0) {
  1364. kvfree(*pages);
  1365. *pages = NULL;
  1366. }
  1367. return len;
  1368. }
  1369. EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
  1370. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  1371. struct iov_iter *i)
  1372. {
  1373. __wsum sum, next;
  1374. sum = *csum;
  1375. if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
  1376. WARN_ON(1);
  1377. return 0;
  1378. }
  1379. iterate_and_advance(i, bytes, base, len, off, ({
  1380. next = csum_and_copy_from_user(base, addr + off, len);
  1381. sum = csum_block_add(sum, next, off);
  1382. next ? 0 : len;
  1383. }), ({
  1384. sum = csum_and_memcpy(addr + off, base, len, sum, off);
  1385. })
  1386. )
  1387. *csum = sum;
  1388. return bytes;
  1389. }
  1390. EXPORT_SYMBOL(csum_and_copy_from_iter);
  1391. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
  1392. struct iov_iter *i)
  1393. {
  1394. struct csum_state *csstate = _csstate;
  1395. __wsum sum, next;
  1396. if (unlikely(iov_iter_is_discard(i))) {
  1397. WARN_ON(1); /* for now */
  1398. return 0;
  1399. }
  1400. sum = csum_shift(csstate->csum, csstate->off);
  1401. if (unlikely(iov_iter_is_pipe(i)))
  1402. bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
  1403. else iterate_and_advance(i, bytes, base, len, off, ({
  1404. next = csum_and_copy_to_user(addr + off, base, len);
  1405. sum = csum_block_add(sum, next, off);
  1406. next ? 0 : len;
  1407. }), ({
  1408. sum = csum_and_memcpy(base, addr + off, len, sum, off);
  1409. })
  1410. )
  1411. csstate->csum = csum_shift(sum, csstate->off);
  1412. csstate->off += bytes;
  1413. return bytes;
  1414. }
  1415. EXPORT_SYMBOL(csum_and_copy_to_iter);
  1416. size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
  1417. struct iov_iter *i)
  1418. {
  1419. #ifdef CONFIG_CRYPTO_HASH
  1420. struct ahash_request *hash = hashp;
  1421. struct scatterlist sg;
  1422. size_t copied;
  1423. copied = copy_to_iter(addr, bytes, i);
  1424. sg_init_one(&sg, addr, copied);
  1425. ahash_request_set_crypt(hash, &sg, NULL, copied);
  1426. crypto_ahash_update(hash);
  1427. return copied;
  1428. #else
  1429. return 0;
  1430. #endif
  1431. }
  1432. EXPORT_SYMBOL(hash_and_copy_to_iter);
  1433. static int iov_npages(const struct iov_iter *i, int maxpages)
  1434. {
  1435. size_t skip = i->iov_offset, size = i->count;
  1436. const struct iovec *p;
  1437. int npages = 0;
  1438. for (p = i->iov; size; skip = 0, p++) {
  1439. unsigned offs = offset_in_page(p->iov_base + skip);
  1440. size_t len = min(p->iov_len - skip, size);
  1441. if (len) {
  1442. size -= len;
  1443. npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
  1444. if (unlikely(npages > maxpages))
  1445. return maxpages;
  1446. }
  1447. }
  1448. return npages;
  1449. }
  1450. static int bvec_npages(const struct iov_iter *i, int maxpages)
  1451. {
  1452. size_t skip = i->iov_offset, size = i->count;
  1453. const struct bio_vec *p;
  1454. int npages = 0;
  1455. for (p = i->bvec; size; skip = 0, p++) {
  1456. unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
  1457. size_t len = min(p->bv_len - skip, size);
  1458. size -= len;
  1459. npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
  1460. if (unlikely(npages > maxpages))
  1461. return maxpages;
  1462. }
  1463. return npages;
  1464. }
  1465. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  1466. {
  1467. if (unlikely(!i->count))
  1468. return 0;
  1469. if (likely(iter_is_ubuf(i))) {
  1470. unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
  1471. int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
  1472. return min(npages, maxpages);
  1473. }
  1474. /* iovec and kvec have identical layouts */
  1475. if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
  1476. return iov_npages(i, maxpages);
  1477. if (iov_iter_is_bvec(i))
  1478. return bvec_npages(i, maxpages);
  1479. if (iov_iter_is_pipe(i)) {
  1480. int npages;
  1481. if (!sanity(i))
  1482. return 0;
  1483. pipe_npages(i, &npages);
  1484. return min(npages, maxpages);
  1485. }
  1486. if (iov_iter_is_xarray(i)) {
  1487. unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
  1488. int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
  1489. return min(npages, maxpages);
  1490. }
  1491. return 0;
  1492. }
  1493. EXPORT_SYMBOL(iov_iter_npages);
  1494. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  1495. {
  1496. *new = *old;
  1497. if (unlikely(iov_iter_is_pipe(new))) {
  1498. WARN_ON(1);
  1499. return NULL;
  1500. }
  1501. if (iov_iter_is_bvec(new))
  1502. return new->bvec = kmemdup(new->bvec,
  1503. new->nr_segs * sizeof(struct bio_vec),
  1504. flags);
  1505. else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
  1506. /* iovec and kvec have identical layout */
  1507. return new->iov = kmemdup(new->iov,
  1508. new->nr_segs * sizeof(struct iovec),
  1509. flags);
  1510. return NULL;
  1511. }
  1512. EXPORT_SYMBOL(dup_iter);
  1513. static int copy_compat_iovec_from_user(struct iovec *iov,
  1514. const struct iovec __user *uvec, unsigned long nr_segs)
  1515. {
  1516. const struct compat_iovec __user *uiov =
  1517. (const struct compat_iovec __user *)uvec;
  1518. int ret = -EFAULT, i;
  1519. if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
  1520. return -EFAULT;
  1521. for (i = 0; i < nr_segs; i++) {
  1522. compat_uptr_t buf;
  1523. compat_ssize_t len;
  1524. unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
  1525. unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
  1526. /* check for compat_size_t not fitting in compat_ssize_t .. */
  1527. if (len < 0) {
  1528. ret = -EINVAL;
  1529. goto uaccess_end;
  1530. }
  1531. iov[i].iov_base = compat_ptr(buf);
  1532. iov[i].iov_len = len;
  1533. }
  1534. ret = 0;
  1535. uaccess_end:
  1536. user_access_end();
  1537. return ret;
  1538. }
  1539. static int copy_iovec_from_user(struct iovec *iov,
  1540. const struct iovec __user *uvec, unsigned long nr_segs)
  1541. {
  1542. unsigned long seg;
  1543. if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
  1544. return -EFAULT;
  1545. for (seg = 0; seg < nr_segs; seg++) {
  1546. if ((ssize_t)iov[seg].iov_len < 0)
  1547. return -EINVAL;
  1548. }
  1549. return 0;
  1550. }
  1551. struct iovec *iovec_from_user(const struct iovec __user *uvec,
  1552. unsigned long nr_segs, unsigned long fast_segs,
  1553. struct iovec *fast_iov, bool compat)
  1554. {
  1555. struct iovec *iov = fast_iov;
  1556. int ret;
  1557. /*
  1558. * SuS says "The readv() function *may* fail if the iovcnt argument was
  1559. * less than or equal to 0, or greater than {IOV_MAX}. Linux has
  1560. * traditionally returned zero for zero segments, so...
  1561. */
  1562. if (nr_segs == 0)
  1563. return iov;
  1564. if (nr_segs > UIO_MAXIOV)
  1565. return ERR_PTR(-EINVAL);
  1566. if (nr_segs > fast_segs) {
  1567. iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
  1568. if (!iov)
  1569. return ERR_PTR(-ENOMEM);
  1570. }
  1571. if (compat)
  1572. ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
  1573. else
  1574. ret = copy_iovec_from_user(iov, uvec, nr_segs);
  1575. if (ret) {
  1576. if (iov != fast_iov)
  1577. kfree(iov);
  1578. return ERR_PTR(ret);
  1579. }
  1580. return iov;
  1581. }
  1582. ssize_t __import_iovec(int type, const struct iovec __user *uvec,
  1583. unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
  1584. struct iov_iter *i, bool compat)
  1585. {
  1586. ssize_t total_len = 0;
  1587. unsigned long seg;
  1588. struct iovec *iov;
  1589. iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
  1590. if (IS_ERR(iov)) {
  1591. *iovp = NULL;
  1592. return PTR_ERR(iov);
  1593. }
  1594. /*
  1595. * According to the Single Unix Specification we should return EINVAL if
  1596. * an element length is < 0 when cast to ssize_t or if the total length
  1597. * would overflow the ssize_t return value of the system call.
  1598. *
  1599. * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
  1600. * overflow case.
  1601. */
  1602. for (seg = 0; seg < nr_segs; seg++) {
  1603. ssize_t len = (ssize_t)iov[seg].iov_len;
  1604. if (!access_ok(iov[seg].iov_base, len)) {
  1605. if (iov != *iovp)
  1606. kfree(iov);
  1607. *iovp = NULL;
  1608. return -EFAULT;
  1609. }
  1610. if (len > MAX_RW_COUNT - total_len) {
  1611. len = MAX_RW_COUNT - total_len;
  1612. iov[seg].iov_len = len;
  1613. }
  1614. total_len += len;
  1615. }
  1616. iov_iter_init(i, type, iov, nr_segs, total_len);
  1617. if (iov == *iovp)
  1618. *iovp = NULL;
  1619. else
  1620. *iovp = iov;
  1621. return total_len;
  1622. }
  1623. /**
  1624. * import_iovec() - Copy an array of &struct iovec from userspace
  1625. * into the kernel, check that it is valid, and initialize a new
  1626. * &struct iov_iter iterator to access it.
  1627. *
  1628. * @type: One of %READ or %WRITE.
  1629. * @uvec: Pointer to the userspace array.
  1630. * @nr_segs: Number of elements in userspace array.
  1631. * @fast_segs: Number of elements in @iov.
  1632. * @iovp: (input and output parameter) Pointer to pointer to (usually small
  1633. * on-stack) kernel array.
  1634. * @i: Pointer to iterator that will be initialized on success.
  1635. *
  1636. * If the array pointed to by *@iov is large enough to hold all @nr_segs,
  1637. * then this function places %NULL in *@iov on return. Otherwise, a new
  1638. * array will be allocated and the result placed in *@iov. This means that
  1639. * the caller may call kfree() on *@iov regardless of whether the small
  1640. * on-stack array was used or not (and regardless of whether this function
  1641. * returns an error or not).
  1642. *
  1643. * Return: Negative error code on error, bytes imported on success
  1644. */
  1645. ssize_t import_iovec(int type, const struct iovec __user *uvec,
  1646. unsigned nr_segs, unsigned fast_segs,
  1647. struct iovec **iovp, struct iov_iter *i)
  1648. {
  1649. return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
  1650. in_compat_syscall());
  1651. }
  1652. EXPORT_SYMBOL(import_iovec);
  1653. int import_single_range(int rw, void __user *buf, size_t len,
  1654. struct iovec *iov, struct iov_iter *i)
  1655. {
  1656. if (len > MAX_RW_COUNT)
  1657. len = MAX_RW_COUNT;
  1658. if (unlikely(!access_ok(buf, len)))
  1659. return -EFAULT;
  1660. iov->iov_base = buf;
  1661. iov->iov_len = len;
  1662. iov_iter_init(i, rw, iov, 1, len);
  1663. return 0;
  1664. }
  1665. EXPORT_SYMBOL(import_single_range);
  1666. /**
  1667. * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
  1668. * iov_iter_save_state() was called.
  1669. *
  1670. * @i: &struct iov_iter to restore
  1671. * @state: state to restore from
  1672. *
  1673. * Used after iov_iter_save_state() to bring restore @i, if operations may
  1674. * have advanced it.
  1675. *
  1676. * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
  1677. */
  1678. void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
  1679. {
  1680. if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
  1681. !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
  1682. return;
  1683. i->iov_offset = state->iov_offset;
  1684. i->count = state->count;
  1685. if (iter_is_ubuf(i))
  1686. return;
  1687. /*
  1688. * For the *vec iters, nr_segs + iov is constant - if we increment
  1689. * the vec, then we also decrement the nr_segs count. Hence we don't
  1690. * need to track both of these, just one is enough and we can deduct
  1691. * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
  1692. * size, so we can just increment the iov pointer as they are unionzed.
  1693. * ITER_BVEC _may_ be the same size on some archs, but on others it is
  1694. * not. Be safe and handle it separately.
  1695. */
  1696. BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
  1697. if (iov_iter_is_bvec(i))
  1698. i->bvec -= state->nr_segs - i->nr_segs;
  1699. else
  1700. i->iov -= state->nr_segs - i->nr_segs;
  1701. i->nr_segs = state->nr_segs;
  1702. }