blk-map.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions related to mapping data to requests
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/sched/task_stack.h>
  7. #include <linux/module.h>
  8. #include <linux/bio.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/uio.h>
  11. #include "blk.h"
  12. struct bio_map_data {
  13. bool is_our_pages : 1;
  14. bool is_null_mapped : 1;
  15. struct iov_iter iter;
  16. struct iovec iov[];
  17. };
  18. static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
  19. gfp_t gfp_mask)
  20. {
  21. struct bio_map_data *bmd;
  22. if (data->nr_segs > UIO_MAXIOV)
  23. return NULL;
  24. bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
  25. if (!bmd)
  26. return NULL;
  27. memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
  28. bmd->iter = *data;
  29. bmd->iter.iov = bmd->iov;
  30. return bmd;
  31. }
  32. /**
  33. * bio_copy_from_iter - copy all pages from iov_iter to bio
  34. * @bio: The &struct bio which describes the I/O as destination
  35. * @iter: iov_iter as source
  36. *
  37. * Copy all pages from iov_iter to bio.
  38. * Returns 0 on success, or error on failure.
  39. */
  40. static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
  41. {
  42. struct bio_vec *bvec;
  43. struct bvec_iter_all iter_all;
  44. bio_for_each_segment_all(bvec, bio, iter_all) {
  45. ssize_t ret;
  46. ret = copy_page_from_iter(bvec->bv_page,
  47. bvec->bv_offset,
  48. bvec->bv_len,
  49. iter);
  50. if (!iov_iter_count(iter))
  51. break;
  52. if (ret < bvec->bv_len)
  53. return -EFAULT;
  54. }
  55. return 0;
  56. }
  57. /**
  58. * bio_copy_to_iter - copy all pages from bio to iov_iter
  59. * @bio: The &struct bio which describes the I/O as source
  60. * @iter: iov_iter as destination
  61. *
  62. * Copy all pages from bio to iov_iter.
  63. * Returns 0 on success, or error on failure.
  64. */
  65. static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
  66. {
  67. struct bio_vec *bvec;
  68. struct bvec_iter_all iter_all;
  69. bio_for_each_segment_all(bvec, bio, iter_all) {
  70. ssize_t ret;
  71. ret = copy_page_to_iter(bvec->bv_page,
  72. bvec->bv_offset,
  73. bvec->bv_len,
  74. &iter);
  75. if (!iov_iter_count(&iter))
  76. break;
  77. if (ret < bvec->bv_len)
  78. return -EFAULT;
  79. }
  80. return 0;
  81. }
  82. /**
  83. * bio_uncopy_user - finish previously mapped bio
  84. * @bio: bio being terminated
  85. *
  86. * Free pages allocated from bio_copy_user_iov() and write back data
  87. * to user space in case of a read.
  88. */
  89. static int bio_uncopy_user(struct bio *bio)
  90. {
  91. struct bio_map_data *bmd = bio->bi_private;
  92. int ret = 0;
  93. if (!bmd->is_null_mapped) {
  94. /*
  95. * if we're in a workqueue, the request is orphaned, so
  96. * don't copy into a random user address space, just free
  97. * and return -EINTR so user space doesn't expect any data.
  98. */
  99. if (!current->mm)
  100. ret = -EINTR;
  101. else if (bio_data_dir(bio) == READ)
  102. ret = bio_copy_to_iter(bio, bmd->iter);
  103. if (bmd->is_our_pages)
  104. bio_free_pages(bio);
  105. }
  106. kfree(bmd);
  107. return ret;
  108. }
  109. static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
  110. struct iov_iter *iter, gfp_t gfp_mask)
  111. {
  112. struct bio_map_data *bmd;
  113. struct page *page;
  114. struct bio *bio;
  115. int i = 0, ret;
  116. int nr_pages;
  117. unsigned int len = iter->count;
  118. unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
  119. bmd = bio_alloc_map_data(iter, gfp_mask);
  120. if (!bmd)
  121. return -ENOMEM;
  122. /*
  123. * We need to do a deep copy of the iov_iter including the iovecs.
  124. * The caller provided iov might point to an on-stack or otherwise
  125. * shortlived one.
  126. */
  127. bmd->is_our_pages = !map_data;
  128. bmd->is_null_mapped = (map_data && map_data->null_mapped);
  129. nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
  130. ret = -ENOMEM;
  131. bio = bio_kmalloc(nr_pages, gfp_mask);
  132. if (!bio)
  133. goto out_bmd;
  134. bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
  135. if (map_data) {
  136. nr_pages = 1U << map_data->page_order;
  137. i = map_data->offset / PAGE_SIZE;
  138. }
  139. while (len) {
  140. unsigned int bytes = PAGE_SIZE;
  141. bytes -= offset;
  142. if (bytes > len)
  143. bytes = len;
  144. if (map_data) {
  145. if (i == map_data->nr_entries * nr_pages) {
  146. ret = -ENOMEM;
  147. goto cleanup;
  148. }
  149. page = map_data->pages[i / nr_pages];
  150. page += (i % nr_pages);
  151. i++;
  152. } else {
  153. page = alloc_page(GFP_NOIO | gfp_mask);
  154. if (!page) {
  155. ret = -ENOMEM;
  156. goto cleanup;
  157. }
  158. }
  159. if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
  160. if (!map_data)
  161. __free_page(page);
  162. break;
  163. }
  164. len -= bytes;
  165. offset = 0;
  166. }
  167. if (map_data)
  168. map_data->offset += bio->bi_iter.bi_size;
  169. /*
  170. * success
  171. */
  172. if ((iov_iter_rw(iter) == WRITE &&
  173. (!map_data || !map_data->null_mapped)) ||
  174. (map_data && map_data->from_user)) {
  175. ret = bio_copy_from_iter(bio, iter);
  176. if (ret)
  177. goto cleanup;
  178. } else {
  179. if (bmd->is_our_pages)
  180. zero_fill_bio(bio);
  181. iov_iter_advance(iter, bio->bi_iter.bi_size);
  182. }
  183. bio->bi_private = bmd;
  184. ret = blk_rq_append_bio(rq, bio);
  185. if (ret)
  186. goto cleanup;
  187. return 0;
  188. cleanup:
  189. if (!map_data)
  190. bio_free_pages(bio);
  191. bio_uninit(bio);
  192. kfree(bio);
  193. out_bmd:
  194. kfree(bmd);
  195. return ret;
  196. }
  197. static void blk_mq_map_bio_put(struct bio *bio)
  198. {
  199. if (bio->bi_opf & REQ_ALLOC_CACHE) {
  200. bio_put(bio);
  201. } else {
  202. bio_uninit(bio);
  203. kfree(bio);
  204. }
  205. }
  206. static struct bio *blk_rq_map_bio_alloc(struct request *rq,
  207. unsigned int nr_vecs, gfp_t gfp_mask)
  208. {
  209. struct bio *bio;
  210. if (rq->cmd_flags & REQ_POLLED && (nr_vecs <= BIO_INLINE_VECS)) {
  211. blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
  212. bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
  213. &fs_bio_set);
  214. if (!bio)
  215. return NULL;
  216. } else {
  217. bio = bio_kmalloc(nr_vecs, gfp_mask);
  218. if (!bio)
  219. return NULL;
  220. bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
  221. }
  222. return bio;
  223. }
  224. static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
  225. gfp_t gfp_mask)
  226. {
  227. unsigned int max_sectors = queue_max_hw_sectors(rq->q);
  228. unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
  229. struct bio *bio;
  230. int ret;
  231. int j;
  232. if (!iov_iter_count(iter))
  233. return -EINVAL;
  234. bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
  235. if (bio == NULL)
  236. return -ENOMEM;
  237. while (iov_iter_count(iter)) {
  238. struct page **pages, *stack_pages[UIO_FASTIOV];
  239. ssize_t bytes;
  240. size_t offs;
  241. int npages;
  242. if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
  243. pages = stack_pages;
  244. bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
  245. nr_vecs, &offs);
  246. } else {
  247. bytes = iov_iter_get_pages_alloc2(iter, &pages,
  248. LONG_MAX, &offs);
  249. }
  250. if (unlikely(bytes <= 0)) {
  251. ret = bytes ? bytes : -EFAULT;
  252. goto out_unmap;
  253. }
  254. npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
  255. if (unlikely(offs & queue_dma_alignment(rq->q)))
  256. j = 0;
  257. else {
  258. for (j = 0; j < npages; j++) {
  259. struct page *page = pages[j];
  260. unsigned int n = PAGE_SIZE - offs;
  261. bool same_page = false;
  262. if (n > bytes)
  263. n = bytes;
  264. if (!bio_add_hw_page(rq->q, bio, page, n, offs,
  265. max_sectors, &same_page)) {
  266. if (same_page)
  267. put_page(page);
  268. break;
  269. }
  270. bytes -= n;
  271. offs = 0;
  272. }
  273. }
  274. /*
  275. * release the pages we didn't map into the bio, if any
  276. */
  277. while (j < npages)
  278. put_page(pages[j++]);
  279. if (pages != stack_pages)
  280. kvfree(pages);
  281. /* couldn't stuff something into bio? */
  282. if (bytes) {
  283. iov_iter_revert(iter, bytes);
  284. break;
  285. }
  286. }
  287. ret = blk_rq_append_bio(rq, bio);
  288. if (ret)
  289. goto out_unmap;
  290. return 0;
  291. out_unmap:
  292. bio_release_pages(bio, false);
  293. blk_mq_map_bio_put(bio);
  294. return ret;
  295. }
  296. static void bio_invalidate_vmalloc_pages(struct bio *bio)
  297. {
  298. #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
  299. if (bio->bi_private && !op_is_write(bio_op(bio))) {
  300. unsigned long i, len = 0;
  301. for (i = 0; i < bio->bi_vcnt; i++)
  302. len += bio->bi_io_vec[i].bv_len;
  303. invalidate_kernel_vmap_range(bio->bi_private, len);
  304. }
  305. #endif
  306. }
  307. static void bio_map_kern_endio(struct bio *bio)
  308. {
  309. bio_invalidate_vmalloc_pages(bio);
  310. bio_uninit(bio);
  311. kfree(bio);
  312. }
  313. /**
  314. * bio_map_kern - map kernel address into bio
  315. * @q: the struct request_queue for the bio
  316. * @data: pointer to buffer to map
  317. * @len: length in bytes
  318. * @gfp_mask: allocation flags for bio allocation
  319. *
  320. * Map the kernel address into a bio suitable for io to a block
  321. * device. Returns an error pointer in case of error.
  322. */
  323. static struct bio *bio_map_kern(struct request_queue *q, void *data,
  324. unsigned int len, gfp_t gfp_mask)
  325. {
  326. unsigned long kaddr = (unsigned long)data;
  327. unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  328. unsigned long start = kaddr >> PAGE_SHIFT;
  329. const int nr_pages = end - start;
  330. bool is_vmalloc = is_vmalloc_addr(data);
  331. struct page *page;
  332. int offset, i;
  333. struct bio *bio;
  334. bio = bio_kmalloc(nr_pages, gfp_mask);
  335. if (!bio)
  336. return ERR_PTR(-ENOMEM);
  337. bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
  338. if (is_vmalloc) {
  339. flush_kernel_vmap_range(data, len);
  340. bio->bi_private = data;
  341. }
  342. offset = offset_in_page(kaddr);
  343. for (i = 0; i < nr_pages; i++) {
  344. unsigned int bytes = PAGE_SIZE - offset;
  345. if (len <= 0)
  346. break;
  347. if (bytes > len)
  348. bytes = len;
  349. if (!is_vmalloc)
  350. page = virt_to_page(data);
  351. else
  352. page = vmalloc_to_page(data);
  353. if (bio_add_pc_page(q, bio, page, bytes,
  354. offset) < bytes) {
  355. /* we don't support partial mappings */
  356. bio_uninit(bio);
  357. kfree(bio);
  358. return ERR_PTR(-EINVAL);
  359. }
  360. data += bytes;
  361. len -= bytes;
  362. offset = 0;
  363. }
  364. bio->bi_end_io = bio_map_kern_endio;
  365. return bio;
  366. }
  367. static void bio_copy_kern_endio(struct bio *bio)
  368. {
  369. bio_free_pages(bio);
  370. bio_uninit(bio);
  371. kfree(bio);
  372. }
  373. static void bio_copy_kern_endio_read(struct bio *bio)
  374. {
  375. char *p = bio->bi_private;
  376. struct bio_vec *bvec;
  377. struct bvec_iter_all iter_all;
  378. bio_for_each_segment_all(bvec, bio, iter_all) {
  379. memcpy_from_bvec(p, bvec);
  380. p += bvec->bv_len;
  381. }
  382. bio_copy_kern_endio(bio);
  383. }
  384. /**
  385. * bio_copy_kern - copy kernel address into bio
  386. * @q: the struct request_queue for the bio
  387. * @data: pointer to buffer to copy
  388. * @len: length in bytes
  389. * @gfp_mask: allocation flags for bio and page allocation
  390. * @reading: data direction is READ
  391. *
  392. * copy the kernel address into a bio suitable for io to a block
  393. * device. Returns an error pointer in case of error.
  394. */
  395. static struct bio *bio_copy_kern(struct request_queue *q, void *data,
  396. unsigned int len, gfp_t gfp_mask, int reading)
  397. {
  398. unsigned long kaddr = (unsigned long)data;
  399. unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  400. unsigned long start = kaddr >> PAGE_SHIFT;
  401. struct bio *bio;
  402. void *p = data;
  403. int nr_pages = 0;
  404. /*
  405. * Overflow, abort
  406. */
  407. if (end < start)
  408. return ERR_PTR(-EINVAL);
  409. nr_pages = end - start;
  410. bio = bio_kmalloc(nr_pages, gfp_mask);
  411. if (!bio)
  412. return ERR_PTR(-ENOMEM);
  413. bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
  414. while (len) {
  415. struct page *page;
  416. unsigned int bytes = PAGE_SIZE;
  417. if (bytes > len)
  418. bytes = len;
  419. page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
  420. if (!page)
  421. goto cleanup;
  422. if (!reading)
  423. memcpy(page_address(page), p, bytes);
  424. if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
  425. break;
  426. len -= bytes;
  427. p += bytes;
  428. }
  429. if (reading) {
  430. bio->bi_end_io = bio_copy_kern_endio_read;
  431. bio->bi_private = data;
  432. } else {
  433. bio->bi_end_io = bio_copy_kern_endio;
  434. }
  435. return bio;
  436. cleanup:
  437. bio_free_pages(bio);
  438. bio_uninit(bio);
  439. kfree(bio);
  440. return ERR_PTR(-ENOMEM);
  441. }
  442. /*
  443. * Append a bio to a passthrough request. Only works if the bio can be merged
  444. * into the request based on the driver constraints.
  445. */
  446. int blk_rq_append_bio(struct request *rq, struct bio *bio)
  447. {
  448. struct bvec_iter iter;
  449. struct bio_vec bv;
  450. unsigned int nr_segs = 0;
  451. bio_for_each_bvec(bv, bio, iter)
  452. nr_segs++;
  453. if (!rq->bio) {
  454. blk_rq_bio_prep(rq, bio, nr_segs);
  455. } else {
  456. if (!ll_back_merge_fn(rq, bio, nr_segs))
  457. return -EINVAL;
  458. rq->biotail->bi_next = bio;
  459. rq->biotail = bio;
  460. rq->__data_len += (bio)->bi_iter.bi_size;
  461. bio_crypt_free_ctx(bio);
  462. }
  463. return 0;
  464. }
  465. EXPORT_SYMBOL(blk_rq_append_bio);
  466. /* Prepare bio for passthrough IO given ITER_BVEC iter */
  467. static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
  468. {
  469. struct request_queue *q = rq->q;
  470. size_t nr_iter = iov_iter_count(iter);
  471. size_t nr_segs = iter->nr_segs;
  472. struct bio_vec *bvecs, *bvprvp = NULL;
  473. struct queue_limits *lim = &q->limits;
  474. unsigned int nsegs = 0, bytes = 0;
  475. struct bio *bio;
  476. size_t i;
  477. if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
  478. return -EINVAL;
  479. if (nr_segs > queue_max_segments(q))
  480. return -EINVAL;
  481. /* no iovecs to alloc, as we already have a BVEC iterator */
  482. bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
  483. if (bio == NULL)
  484. return -ENOMEM;
  485. bio_iov_bvec_set(bio, (struct iov_iter *)iter);
  486. blk_rq_bio_prep(rq, bio, nr_segs);
  487. /* loop to perform a bunch of sanity checks */
  488. bvecs = (struct bio_vec *)iter->bvec;
  489. for (i = 0; i < nr_segs; i++) {
  490. struct bio_vec *bv = &bvecs[i];
  491. /*
  492. * If the queue doesn't support SG gaps and adding this
  493. * offset would create a gap, fallback to copy.
  494. */
  495. if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
  496. blk_mq_map_bio_put(bio);
  497. return -EREMOTEIO;
  498. }
  499. /* check full condition */
  500. if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
  501. goto put_bio;
  502. if (bytes + bv->bv_len > nr_iter)
  503. goto put_bio;
  504. if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
  505. goto put_bio;
  506. nsegs++;
  507. bytes += bv->bv_len;
  508. bvprvp = bv;
  509. }
  510. return 0;
  511. put_bio:
  512. blk_mq_map_bio_put(bio);
  513. return -EINVAL;
  514. }
  515. /**
  516. * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  517. * @q: request queue where request should be inserted
  518. * @rq: request to map data to
  519. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  520. * @iter: iovec iterator
  521. * @gfp_mask: memory allocation flags
  522. *
  523. * Description:
  524. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  525. * a kernel bounce buffer is used.
  526. *
  527. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  528. * still in process context.
  529. */
  530. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  531. struct rq_map_data *map_data,
  532. const struct iov_iter *iter, gfp_t gfp_mask)
  533. {
  534. bool copy = false, map_bvec = false;
  535. unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
  536. struct bio *bio = NULL;
  537. struct iov_iter i;
  538. int ret = -EINVAL;
  539. if (map_data)
  540. copy = true;
  541. else if (blk_queue_may_bounce(q))
  542. copy = true;
  543. else if (iov_iter_alignment(iter) & align)
  544. copy = true;
  545. else if (iov_iter_is_bvec(iter))
  546. map_bvec = true;
  547. else if (!iter_is_iovec(iter))
  548. copy = true;
  549. else if (queue_virt_boundary(q))
  550. copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
  551. if (map_bvec) {
  552. ret = blk_rq_map_user_bvec(rq, iter);
  553. if (!ret)
  554. return 0;
  555. if (ret != -EREMOTEIO)
  556. goto fail;
  557. /* fall back to copying the data on limits mismatches */
  558. copy = true;
  559. }
  560. i = *iter;
  561. do {
  562. if (copy)
  563. ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
  564. else
  565. ret = bio_map_user_iov(rq, &i, gfp_mask);
  566. if (ret)
  567. goto unmap_rq;
  568. if (!bio)
  569. bio = rq->bio;
  570. } while (iov_iter_count(&i));
  571. return 0;
  572. unmap_rq:
  573. blk_rq_unmap_user(bio);
  574. fail:
  575. rq->bio = NULL;
  576. return ret;
  577. }
  578. EXPORT_SYMBOL(blk_rq_map_user_iov);
  579. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  580. struct rq_map_data *map_data, void __user *ubuf,
  581. unsigned long len, gfp_t gfp_mask)
  582. {
  583. struct iovec iov;
  584. struct iov_iter i;
  585. int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
  586. if (unlikely(ret < 0))
  587. return ret;
  588. return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
  589. }
  590. EXPORT_SYMBOL(blk_rq_map_user);
  591. int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
  592. void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
  593. bool vec, int iov_count, bool check_iter_count, int rw)
  594. {
  595. int ret = 0;
  596. if (vec) {
  597. struct iovec fast_iov[UIO_FASTIOV];
  598. struct iovec *iov = fast_iov;
  599. struct iov_iter iter;
  600. ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
  601. UIO_FASTIOV, &iov, &iter);
  602. if (ret < 0)
  603. return ret;
  604. if (iov_count) {
  605. /* SG_IO howto says that the shorter of the two wins */
  606. iov_iter_truncate(&iter, buf_len);
  607. if (check_iter_count && !iov_iter_count(&iter)) {
  608. kfree(iov);
  609. return -EINVAL;
  610. }
  611. }
  612. ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
  613. gfp_mask);
  614. kfree(iov);
  615. } else if (buf_len) {
  616. ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
  617. gfp_mask);
  618. }
  619. return ret;
  620. }
  621. EXPORT_SYMBOL(blk_rq_map_user_io);
  622. /**
  623. * blk_rq_unmap_user - unmap a request with user data
  624. * @bio: start of bio list
  625. *
  626. * Description:
  627. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  628. * supply the original rq->bio from the blk_rq_map_user() return, since
  629. * the I/O completion may have changed rq->bio.
  630. */
  631. int blk_rq_unmap_user(struct bio *bio)
  632. {
  633. struct bio *next_bio;
  634. int ret = 0, ret2;
  635. while (bio) {
  636. if (bio->bi_private) {
  637. ret2 = bio_uncopy_user(bio);
  638. if (ret2 && !ret)
  639. ret = ret2;
  640. } else {
  641. bio_release_pages(bio, bio_data_dir(bio) == READ);
  642. }
  643. next_bio = bio;
  644. bio = bio->bi_next;
  645. blk_mq_map_bio_put(next_bio);
  646. }
  647. return ret;
  648. }
  649. EXPORT_SYMBOL(blk_rq_unmap_user);
  650. /**
  651. * blk_rq_map_kern - map kernel data to a request, for passthrough requests
  652. * @q: request queue where request should be inserted
  653. * @rq: request to fill
  654. * @kbuf: the kernel buffer
  655. * @len: length of user data
  656. * @gfp_mask: memory allocation flags
  657. *
  658. * Description:
  659. * Data will be mapped directly if possible. Otherwise a bounce
  660. * buffer is used. Can be called multiple times to append multiple
  661. * buffers.
  662. */
  663. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  664. unsigned int len, gfp_t gfp_mask)
  665. {
  666. int reading = rq_data_dir(rq) == READ;
  667. unsigned long addr = (unsigned long) kbuf;
  668. struct bio *bio;
  669. int ret;
  670. if (len > (queue_max_hw_sectors(q) << 9))
  671. return -EINVAL;
  672. if (!len || !kbuf)
  673. return -EINVAL;
  674. if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
  675. blk_queue_may_bounce(q))
  676. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  677. else
  678. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  679. if (IS_ERR(bio))
  680. return PTR_ERR(bio);
  681. bio->bi_opf &= ~REQ_OP_MASK;
  682. bio->bi_opf |= req_op(rq);
  683. ret = blk_rq_append_bio(rq, bio);
  684. if (unlikely(ret)) {
  685. bio_uninit(bio);
  686. kfree(bio);
  687. }
  688. return ret;
  689. }
  690. EXPORT_SYMBOL(blk_rq_map_kern);