scatterlist.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2007 Jens Axboe <[email protected]>
  4. *
  5. * Scatterlist handling helpers.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/slab.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/highmem.h>
  11. #include <linux/kmemleak.h>
  12. /**
  13. * sg_next - return the next scatterlist entry in a list
  14. * @sg: The current sg entry
  15. *
  16. * Description:
  17. * Usually the next entry will be @sg@ + 1, but if this sg element is part
  18. * of a chained scatterlist, it could jump to the start of a new
  19. * scatterlist array.
  20. *
  21. **/
  22. struct scatterlist *sg_next(struct scatterlist *sg)
  23. {
  24. if (sg_is_last(sg))
  25. return NULL;
  26. sg++;
  27. if (unlikely(sg_is_chain(sg)))
  28. sg = sg_chain_ptr(sg);
  29. return sg;
  30. }
  31. EXPORT_SYMBOL(sg_next);
  32. /**
  33. * sg_nents - return total count of entries in scatterlist
  34. * @sg: The scatterlist
  35. *
  36. * Description:
  37. * Allows to know how many entries are in sg, taking into account
  38. * chaining as well
  39. *
  40. **/
  41. int sg_nents(struct scatterlist *sg)
  42. {
  43. int nents;
  44. for (nents = 0; sg; sg = sg_next(sg))
  45. nents++;
  46. return nents;
  47. }
  48. EXPORT_SYMBOL(sg_nents);
  49. /**
  50. * sg_nents_for_len - return total count of entries in scatterlist
  51. * needed to satisfy the supplied length
  52. * @sg: The scatterlist
  53. * @len: The total required length
  54. *
  55. * Description:
  56. * Determines the number of entries in sg that are required to meet
  57. * the supplied length, taking into account chaining as well
  58. *
  59. * Returns:
  60. * the number of sg entries needed, negative error on failure
  61. *
  62. **/
  63. int sg_nents_for_len(struct scatterlist *sg, u64 len)
  64. {
  65. int nents;
  66. u64 total;
  67. if (!len)
  68. return 0;
  69. for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
  70. nents++;
  71. total += sg->length;
  72. if (total >= len)
  73. return nents;
  74. }
  75. return -EINVAL;
  76. }
  77. EXPORT_SYMBOL(sg_nents_for_len);
  78. /**
  79. * sg_last - return the last scatterlist entry in a list
  80. * @sgl: First entry in the scatterlist
  81. * @nents: Number of entries in the scatterlist
  82. *
  83. * Description:
  84. * Should only be used casually, it (currently) scans the entire list
  85. * to get the last entry.
  86. *
  87. * Note that the @sgl@ pointer passed in need not be the first one,
  88. * the important bit is that @nents@ denotes the number of entries that
  89. * exist from @sgl@.
  90. *
  91. **/
  92. struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
  93. {
  94. struct scatterlist *sg, *ret = NULL;
  95. unsigned int i;
  96. for_each_sg(sgl, sg, nents, i)
  97. ret = sg;
  98. BUG_ON(!sg_is_last(ret));
  99. return ret;
  100. }
  101. EXPORT_SYMBOL(sg_last);
  102. /**
  103. * sg_init_table - Initialize SG table
  104. * @sgl: The SG table
  105. * @nents: Number of entries in table
  106. *
  107. * Notes:
  108. * If this is part of a chained sg table, sg_mark_end() should be
  109. * used only on the last table part.
  110. *
  111. **/
  112. void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  113. {
  114. memset(sgl, 0, sizeof(*sgl) * nents);
  115. sg_init_marker(sgl, nents);
  116. }
  117. EXPORT_SYMBOL(sg_init_table);
  118. /**
  119. * sg_init_one - Initialize a single entry sg list
  120. * @sg: SG entry
  121. * @buf: Virtual address for IO
  122. * @buflen: IO length
  123. *
  124. **/
  125. void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
  126. {
  127. sg_init_table(sg, 1);
  128. sg_set_buf(sg, buf, buflen);
  129. }
  130. EXPORT_SYMBOL(sg_init_one);
  131. /*
  132. * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
  133. * helpers.
  134. */
  135. static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
  136. {
  137. if (nents == SG_MAX_SINGLE_ALLOC) {
  138. /*
  139. * Kmemleak doesn't track page allocations as they are not
  140. * commonly used (in a raw form) for kernel data structures.
  141. * As we chain together a list of pages and then a normal
  142. * kmalloc (tracked by kmemleak), in order to for that last
  143. * allocation not to become decoupled (and thus a
  144. * false-positive) we need to inform kmemleak of all the
  145. * intermediate allocations.
  146. */
  147. void *ptr = (void *) __get_free_page(gfp_mask);
  148. kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
  149. return ptr;
  150. } else
  151. return kmalloc_array(nents, sizeof(struct scatterlist),
  152. gfp_mask);
  153. }
  154. static void sg_kfree(struct scatterlist *sg, unsigned int nents)
  155. {
  156. if (nents == SG_MAX_SINGLE_ALLOC) {
  157. kmemleak_free(sg);
  158. free_page((unsigned long) sg);
  159. } else
  160. kfree(sg);
  161. }
  162. /**
  163. * __sg_free_table - Free a previously mapped sg table
  164. * @table: The sg table header to use
  165. * @max_ents: The maximum number of entries per single scatterlist
  166. * @nents_first_chunk: Number of entries int the (preallocated) first
  167. * scatterlist chunk, 0 means no such preallocated first chunk
  168. * @free_fn: Free function
  169. * @num_ents: Number of entries in the table
  170. *
  171. * Description:
  172. * Free an sg table previously allocated and setup with
  173. * __sg_alloc_table(). The @max_ents value must be identical to
  174. * that previously used with __sg_alloc_table().
  175. *
  176. **/
  177. void __sg_free_table(struct sg_table *table, unsigned int max_ents,
  178. unsigned int nents_first_chunk, sg_free_fn *free_fn,
  179. unsigned int num_ents)
  180. {
  181. struct scatterlist *sgl, *next;
  182. unsigned curr_max_ents = nents_first_chunk ?: max_ents;
  183. if (unlikely(!table->sgl))
  184. return;
  185. sgl = table->sgl;
  186. while (num_ents) {
  187. unsigned int alloc_size = num_ents;
  188. unsigned int sg_size;
  189. /*
  190. * If we have more than max_ents segments left,
  191. * then assign 'next' to the sg table after the current one.
  192. * sg_size is then one less than alloc size, since the last
  193. * element is the chain pointer.
  194. */
  195. if (alloc_size > curr_max_ents) {
  196. next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
  197. alloc_size = curr_max_ents;
  198. sg_size = alloc_size - 1;
  199. } else {
  200. sg_size = alloc_size;
  201. next = NULL;
  202. }
  203. num_ents -= sg_size;
  204. if (nents_first_chunk)
  205. nents_first_chunk = 0;
  206. else
  207. free_fn(sgl, alloc_size);
  208. sgl = next;
  209. curr_max_ents = max_ents;
  210. }
  211. table->sgl = NULL;
  212. }
  213. EXPORT_SYMBOL(__sg_free_table);
  214. /**
  215. * sg_free_append_table - Free a previously allocated append sg table.
  216. * @table: The mapped sg append table header
  217. *
  218. **/
  219. void sg_free_append_table(struct sg_append_table *table)
  220. {
  221. __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
  222. table->total_nents);
  223. }
  224. EXPORT_SYMBOL(sg_free_append_table);
  225. /**
  226. * sg_free_table - Free a previously allocated sg table
  227. * @table: The mapped sg table header
  228. *
  229. **/
  230. void sg_free_table(struct sg_table *table)
  231. {
  232. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
  233. table->orig_nents);
  234. }
  235. EXPORT_SYMBOL(sg_free_table);
  236. /**
  237. * __sg_alloc_table - Allocate and initialize an sg table with given allocator
  238. * @table: The sg table header to use
  239. * @nents: Number of entries in sg list
  240. * @max_ents: The maximum number of entries the allocator returns per call
  241. * @nents_first_chunk: Number of entries int the (preallocated) first
  242. * scatterlist chunk, 0 means no such preallocated chunk provided by user
  243. * @gfp_mask: GFP allocation mask
  244. * @alloc_fn: Allocator to use
  245. *
  246. * Description:
  247. * This function returns a @table @nents long. The allocator is
  248. * defined to return scatterlist chunks of maximum size @max_ents.
  249. * Thus if @nents is bigger than @max_ents, the scatterlists will be
  250. * chained in units of @max_ents.
  251. *
  252. * Notes:
  253. * If this function returns non-0 (eg failure), the caller must call
  254. * __sg_free_table() to cleanup any leftover allocations.
  255. *
  256. **/
  257. int __sg_alloc_table(struct sg_table *table, unsigned int nents,
  258. unsigned int max_ents, struct scatterlist *first_chunk,
  259. unsigned int nents_first_chunk, gfp_t gfp_mask,
  260. sg_alloc_fn *alloc_fn)
  261. {
  262. struct scatterlist *sg, *prv;
  263. unsigned int left;
  264. unsigned curr_max_ents = nents_first_chunk ?: max_ents;
  265. unsigned prv_max_ents;
  266. memset(table, 0, sizeof(*table));
  267. if (nents == 0)
  268. return -EINVAL;
  269. #ifdef CONFIG_ARCH_NO_SG_CHAIN
  270. if (WARN_ON_ONCE(nents > max_ents))
  271. return -EINVAL;
  272. #endif
  273. left = nents;
  274. prv = NULL;
  275. do {
  276. unsigned int sg_size, alloc_size = left;
  277. if (alloc_size > curr_max_ents) {
  278. alloc_size = curr_max_ents;
  279. sg_size = alloc_size - 1;
  280. } else
  281. sg_size = alloc_size;
  282. left -= sg_size;
  283. if (first_chunk) {
  284. sg = first_chunk;
  285. first_chunk = NULL;
  286. } else {
  287. sg = alloc_fn(alloc_size, gfp_mask);
  288. }
  289. if (unlikely(!sg)) {
  290. /*
  291. * Adjust entry count to reflect that the last
  292. * entry of the previous table won't be used for
  293. * linkage. Without this, sg_kfree() may get
  294. * confused.
  295. */
  296. if (prv)
  297. table->nents = ++table->orig_nents;
  298. return -ENOMEM;
  299. }
  300. sg_init_table(sg, alloc_size);
  301. table->nents = table->orig_nents += sg_size;
  302. /*
  303. * If this is the first mapping, assign the sg table header.
  304. * If this is not the first mapping, chain previous part.
  305. */
  306. if (prv)
  307. sg_chain(prv, prv_max_ents, sg);
  308. else
  309. table->sgl = sg;
  310. /*
  311. * If no more entries after this one, mark the end
  312. */
  313. if (!left)
  314. sg_mark_end(&sg[sg_size - 1]);
  315. prv = sg;
  316. prv_max_ents = curr_max_ents;
  317. curr_max_ents = max_ents;
  318. } while (left);
  319. return 0;
  320. }
  321. EXPORT_SYMBOL(__sg_alloc_table);
  322. /**
  323. * sg_alloc_table - Allocate and initialize an sg table
  324. * @table: The sg table header to use
  325. * @nents: Number of entries in sg list
  326. * @gfp_mask: GFP allocation mask
  327. *
  328. * Description:
  329. * Allocate and initialize an sg table. If @nents@ is larger than
  330. * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
  331. *
  332. **/
  333. int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
  334. {
  335. int ret;
  336. ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
  337. NULL, 0, gfp_mask, sg_kmalloc);
  338. if (unlikely(ret))
  339. sg_free_table(table);
  340. return ret;
  341. }
  342. EXPORT_SYMBOL(sg_alloc_table);
  343. static struct scatterlist *get_next_sg(struct sg_append_table *table,
  344. struct scatterlist *cur,
  345. unsigned long needed_sges,
  346. gfp_t gfp_mask)
  347. {
  348. struct scatterlist *new_sg, *next_sg;
  349. unsigned int alloc_size;
  350. if (cur) {
  351. next_sg = sg_next(cur);
  352. /* Check if last entry should be keeped for chainning */
  353. if (!sg_is_last(next_sg) || needed_sges == 1)
  354. return next_sg;
  355. }
  356. alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
  357. new_sg = sg_kmalloc(alloc_size, gfp_mask);
  358. if (!new_sg)
  359. return ERR_PTR(-ENOMEM);
  360. sg_init_table(new_sg, alloc_size);
  361. if (cur) {
  362. table->total_nents += alloc_size - 1;
  363. __sg_chain(next_sg, new_sg);
  364. } else {
  365. table->sgt.sgl = new_sg;
  366. table->total_nents = alloc_size;
  367. }
  368. return new_sg;
  369. }
  370. /**
  371. * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
  372. * table from an array of pages
  373. * @sgt_append: The sg append table to use
  374. * @pages: Pointer to an array of page pointers
  375. * @n_pages: Number of pages in the pages array
  376. * @offset: Offset from start of the first page to the start of a buffer
  377. * @size: Number of valid bytes in the buffer (after offset)
  378. * @max_segment: Maximum size of a scatterlist element in bytes
  379. * @left_pages: Left pages caller have to set after this call
  380. * @gfp_mask: GFP allocation mask
  381. *
  382. * Description:
  383. * In the first call it allocate and initialize an sg table from a list of
  384. * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
  385. * the pages are squashed into a single scatterlist entry up to the maximum
  386. * size specified in @max_segment. A user may provide an offset at a start
  387. * and a size of valid data in a buffer specified by the page array. The
  388. * returned sg table is released by sg_free_append_table
  389. *
  390. * Returns:
  391. * 0 on success, negative error on failure
  392. *
  393. * Notes:
  394. * If this function returns non-0 (eg failure), the caller must call
  395. * sg_free_append_table() to cleanup any leftover allocations.
  396. *
  397. * In the fist call, sgt_append must by initialized.
  398. */
  399. int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
  400. struct page **pages, unsigned int n_pages, unsigned int offset,
  401. unsigned long size, unsigned int max_segment,
  402. unsigned int left_pages, gfp_t gfp_mask)
  403. {
  404. unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
  405. unsigned int added_nents = 0;
  406. struct scatterlist *s = sgt_append->prv;
  407. /*
  408. * The algorithm below requires max_segment to be aligned to PAGE_SIZE
  409. * otherwise it can overshoot.
  410. */
  411. max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
  412. if (WARN_ON(max_segment < PAGE_SIZE))
  413. return -EINVAL;
  414. if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
  415. return -EOPNOTSUPP;
  416. if (sgt_append->prv) {
  417. unsigned long paddr =
  418. (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
  419. sgt_append->prv->offset + sgt_append->prv->length) /
  420. PAGE_SIZE;
  421. if (WARN_ON(offset))
  422. return -EINVAL;
  423. /* Merge contiguous pages into the last SG */
  424. prv_len = sgt_append->prv->length;
  425. while (n_pages && page_to_pfn(pages[0]) == paddr) {
  426. if (sgt_append->prv->length + PAGE_SIZE > max_segment)
  427. break;
  428. sgt_append->prv->length += PAGE_SIZE;
  429. paddr++;
  430. pages++;
  431. n_pages--;
  432. }
  433. if (!n_pages)
  434. goto out;
  435. }
  436. /* compute number of contiguous chunks */
  437. chunks = 1;
  438. seg_len = 0;
  439. for (i = 1; i < n_pages; i++) {
  440. seg_len += PAGE_SIZE;
  441. if (seg_len >= max_segment ||
  442. page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
  443. chunks++;
  444. seg_len = 0;
  445. }
  446. }
  447. /* merging chunks and putting them into the scatterlist */
  448. cur_page = 0;
  449. for (i = 0; i < chunks; i++) {
  450. unsigned int j, chunk_size;
  451. /* look for the end of the current chunk */
  452. seg_len = 0;
  453. for (j = cur_page + 1; j < n_pages; j++) {
  454. seg_len += PAGE_SIZE;
  455. if (seg_len >= max_segment ||
  456. page_to_pfn(pages[j]) !=
  457. page_to_pfn(pages[j - 1]) + 1)
  458. break;
  459. }
  460. /* Pass how many chunks might be left */
  461. s = get_next_sg(sgt_append, s, chunks - i + left_pages,
  462. gfp_mask);
  463. if (IS_ERR(s)) {
  464. /*
  465. * Adjust entry length to be as before function was
  466. * called.
  467. */
  468. if (sgt_append->prv)
  469. sgt_append->prv->length = prv_len;
  470. return PTR_ERR(s);
  471. }
  472. chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
  473. sg_set_page(s, pages[cur_page],
  474. min_t(unsigned long, size, chunk_size), offset);
  475. added_nents++;
  476. size -= chunk_size;
  477. offset = 0;
  478. cur_page = j;
  479. }
  480. sgt_append->sgt.nents += added_nents;
  481. sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
  482. sgt_append->prv = s;
  483. out:
  484. if (!left_pages)
  485. sg_mark_end(s);
  486. return 0;
  487. }
  488. EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
  489. /**
  490. * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
  491. * an array of pages and given maximum
  492. * segment.
  493. * @sgt: The sg table header to use
  494. * @pages: Pointer to an array of page pointers
  495. * @n_pages: Number of pages in the pages array
  496. * @offset: Offset from start of the first page to the start of a buffer
  497. * @size: Number of valid bytes in the buffer (after offset)
  498. * @max_segment: Maximum size of a scatterlist element in bytes
  499. * @gfp_mask: GFP allocation mask
  500. *
  501. * Description:
  502. * Allocate and initialize an sg table from a list of pages. Contiguous
  503. * ranges of the pages are squashed into a single scatterlist node up to the
  504. * maximum size specified in @max_segment. A user may provide an offset at a
  505. * start and a size of valid data in a buffer specified by the page array.
  506. *
  507. * The returned sg table is released by sg_free_table.
  508. *
  509. * Returns:
  510. * 0 on success, negative error on failure
  511. */
  512. int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
  513. unsigned int n_pages, unsigned int offset,
  514. unsigned long size, unsigned int max_segment,
  515. gfp_t gfp_mask)
  516. {
  517. struct sg_append_table append = {};
  518. int err;
  519. err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
  520. size, max_segment, 0, gfp_mask);
  521. if (err) {
  522. sg_free_append_table(&append);
  523. return err;
  524. }
  525. memcpy(sgt, &append.sgt, sizeof(*sgt));
  526. WARN_ON(append.total_nents != sgt->orig_nents);
  527. return 0;
  528. }
  529. EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
  530. #ifdef CONFIG_SGL_ALLOC
  531. /**
  532. * sgl_alloc_order - allocate a scatterlist and its pages
  533. * @length: Length in bytes of the scatterlist. Must be at least one
  534. * @order: Second argument for alloc_pages()
  535. * @chainable: Whether or not to allocate an extra element in the scatterlist
  536. * for scatterlist chaining purposes
  537. * @gfp: Memory allocation flags
  538. * @nent_p: [out] Number of entries in the scatterlist that have pages
  539. *
  540. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  541. */
  542. struct scatterlist *sgl_alloc_order(unsigned long long length,
  543. unsigned int order, bool chainable,
  544. gfp_t gfp, unsigned int *nent_p)
  545. {
  546. struct scatterlist *sgl, *sg;
  547. struct page *page;
  548. unsigned int nent, nalloc;
  549. u32 elem_len;
  550. nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
  551. /* Check for integer overflow */
  552. if (length > (nent << (PAGE_SHIFT + order)))
  553. return NULL;
  554. nalloc = nent;
  555. if (chainable) {
  556. /* Check for integer overflow */
  557. if (nalloc + 1 < nalloc)
  558. return NULL;
  559. nalloc++;
  560. }
  561. sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
  562. gfp & ~GFP_DMA);
  563. if (!sgl)
  564. return NULL;
  565. sg_init_table(sgl, nalloc);
  566. sg = sgl;
  567. while (length) {
  568. elem_len = min_t(u64, length, PAGE_SIZE << order);
  569. page = alloc_pages(gfp, order);
  570. if (!page) {
  571. sgl_free_order(sgl, order);
  572. return NULL;
  573. }
  574. sg_set_page(sg, page, elem_len, 0);
  575. length -= elem_len;
  576. sg = sg_next(sg);
  577. }
  578. WARN_ONCE(length, "length = %lld\n", length);
  579. if (nent_p)
  580. *nent_p = nent;
  581. return sgl;
  582. }
  583. EXPORT_SYMBOL(sgl_alloc_order);
  584. /**
  585. * sgl_alloc - allocate a scatterlist and its pages
  586. * @length: Length in bytes of the scatterlist
  587. * @gfp: Memory allocation flags
  588. * @nent_p: [out] Number of entries in the scatterlist
  589. *
  590. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  591. */
  592. struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
  593. unsigned int *nent_p)
  594. {
  595. return sgl_alloc_order(length, 0, false, gfp, nent_p);
  596. }
  597. EXPORT_SYMBOL(sgl_alloc);
  598. /**
  599. * sgl_free_n_order - free a scatterlist and its pages
  600. * @sgl: Scatterlist with one or more elements
  601. * @nents: Maximum number of elements to free
  602. * @order: Second argument for __free_pages()
  603. *
  604. * Notes:
  605. * - If several scatterlists have been chained and each chain element is
  606. * freed separately then it's essential to set nents correctly to avoid that a
  607. * page would get freed twice.
  608. * - All pages in a chained scatterlist can be freed at once by setting @nents
  609. * to a high number.
  610. */
  611. void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
  612. {
  613. struct scatterlist *sg;
  614. struct page *page;
  615. int i;
  616. for_each_sg(sgl, sg, nents, i) {
  617. if (!sg)
  618. break;
  619. page = sg_page(sg);
  620. if (page)
  621. __free_pages(page, order);
  622. }
  623. kfree(sgl);
  624. }
  625. EXPORT_SYMBOL(sgl_free_n_order);
  626. /**
  627. * sgl_free_order - free a scatterlist and its pages
  628. * @sgl: Scatterlist with one or more elements
  629. * @order: Second argument for __free_pages()
  630. */
  631. void sgl_free_order(struct scatterlist *sgl, int order)
  632. {
  633. sgl_free_n_order(sgl, INT_MAX, order);
  634. }
  635. EXPORT_SYMBOL(sgl_free_order);
  636. /**
  637. * sgl_free - free a scatterlist and its pages
  638. * @sgl: Scatterlist with one or more elements
  639. */
  640. void sgl_free(struct scatterlist *sgl)
  641. {
  642. sgl_free_order(sgl, 0);
  643. }
  644. EXPORT_SYMBOL(sgl_free);
  645. #endif /* CONFIG_SGL_ALLOC */
  646. void __sg_page_iter_start(struct sg_page_iter *piter,
  647. struct scatterlist *sglist, unsigned int nents,
  648. unsigned long pgoffset)
  649. {
  650. piter->__pg_advance = 0;
  651. piter->__nents = nents;
  652. piter->sg = sglist;
  653. piter->sg_pgoffset = pgoffset;
  654. }
  655. EXPORT_SYMBOL(__sg_page_iter_start);
  656. static int sg_page_count(struct scatterlist *sg)
  657. {
  658. return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
  659. }
  660. bool __sg_page_iter_next(struct sg_page_iter *piter)
  661. {
  662. if (!piter->__nents || !piter->sg)
  663. return false;
  664. piter->sg_pgoffset += piter->__pg_advance;
  665. piter->__pg_advance = 1;
  666. while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
  667. piter->sg_pgoffset -= sg_page_count(piter->sg);
  668. piter->sg = sg_next(piter->sg);
  669. if (!--piter->__nents || !piter->sg)
  670. return false;
  671. }
  672. return true;
  673. }
  674. EXPORT_SYMBOL(__sg_page_iter_next);
  675. static int sg_dma_page_count(struct scatterlist *sg)
  676. {
  677. return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
  678. }
  679. bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
  680. {
  681. struct sg_page_iter *piter = &dma_iter->base;
  682. if (!piter->__nents || !piter->sg)
  683. return false;
  684. piter->sg_pgoffset += piter->__pg_advance;
  685. piter->__pg_advance = 1;
  686. while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
  687. piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
  688. piter->sg = sg_next(piter->sg);
  689. if (!--piter->__nents || !piter->sg)
  690. return false;
  691. }
  692. return true;
  693. }
  694. EXPORT_SYMBOL(__sg_page_iter_dma_next);
  695. /**
  696. * sg_miter_start - start mapping iteration over a sg list
  697. * @miter: sg mapping iter to be started
  698. * @sgl: sg list to iterate over
  699. * @nents: number of sg entries
  700. *
  701. * Description:
  702. * Starts mapping iterator @miter.
  703. *
  704. * Context:
  705. * Don't care.
  706. */
  707. void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  708. unsigned int nents, unsigned int flags)
  709. {
  710. memset(miter, 0, sizeof(struct sg_mapping_iter));
  711. __sg_page_iter_start(&miter->piter, sgl, nents, 0);
  712. WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
  713. miter->__flags = flags;
  714. }
  715. EXPORT_SYMBOL(sg_miter_start);
  716. static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
  717. {
  718. if (!miter->__remaining) {
  719. struct scatterlist *sg;
  720. if (!__sg_page_iter_next(&miter->piter))
  721. return false;
  722. sg = miter->piter.sg;
  723. miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
  724. miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
  725. miter->__offset &= PAGE_SIZE - 1;
  726. miter->__remaining = sg->offset + sg->length -
  727. (miter->piter.sg_pgoffset << PAGE_SHIFT) -
  728. miter->__offset;
  729. miter->__remaining = min_t(unsigned long, miter->__remaining,
  730. PAGE_SIZE - miter->__offset);
  731. }
  732. return true;
  733. }
  734. /**
  735. * sg_miter_skip - reposition mapping iterator
  736. * @miter: sg mapping iter to be skipped
  737. * @offset: number of bytes to plus the current location
  738. *
  739. * Description:
  740. * Sets the offset of @miter to its current location plus @offset bytes.
  741. * If mapping iterator @miter has been proceeded by sg_miter_next(), this
  742. * stops @miter.
  743. *
  744. * Context:
  745. * Don't care.
  746. *
  747. * Returns:
  748. * true if @miter contains the valid mapping. false if end of sg
  749. * list is reached.
  750. */
  751. bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
  752. {
  753. sg_miter_stop(miter);
  754. while (offset) {
  755. off_t consumed;
  756. if (!sg_miter_get_next_page(miter))
  757. return false;
  758. consumed = min_t(off_t, offset, miter->__remaining);
  759. miter->__offset += consumed;
  760. miter->__remaining -= consumed;
  761. offset -= consumed;
  762. }
  763. return true;
  764. }
  765. EXPORT_SYMBOL(sg_miter_skip);
  766. /**
  767. * sg_miter_next - proceed mapping iterator to the next mapping
  768. * @miter: sg mapping iter to proceed
  769. *
  770. * Description:
  771. * Proceeds @miter to the next mapping. @miter should have been started
  772. * using sg_miter_start(). On successful return, @miter->page,
  773. * @miter->addr and @miter->length point to the current mapping.
  774. *
  775. * Context:
  776. * May sleep if !SG_MITER_ATOMIC.
  777. *
  778. * Returns:
  779. * true if @miter contains the next mapping. false if end of sg
  780. * list is reached.
  781. */
  782. bool sg_miter_next(struct sg_mapping_iter *miter)
  783. {
  784. sg_miter_stop(miter);
  785. /*
  786. * Get to the next page if necessary.
  787. * __remaining, __offset is adjusted by sg_miter_stop
  788. */
  789. if (!sg_miter_get_next_page(miter))
  790. return false;
  791. miter->page = sg_page_iter_page(&miter->piter);
  792. miter->consumed = miter->length = miter->__remaining;
  793. if (miter->__flags & SG_MITER_ATOMIC)
  794. miter->addr = kmap_atomic(miter->page) + miter->__offset;
  795. else
  796. miter->addr = kmap(miter->page) + miter->__offset;
  797. return true;
  798. }
  799. EXPORT_SYMBOL(sg_miter_next);
  800. /**
  801. * sg_miter_stop - stop mapping iteration
  802. * @miter: sg mapping iter to be stopped
  803. *
  804. * Description:
  805. * Stops mapping iterator @miter. @miter should have been started
  806. * using sg_miter_start(). A stopped iteration can be resumed by
  807. * calling sg_miter_next() on it. This is useful when resources (kmap)
  808. * need to be released during iteration.
  809. *
  810. * Context:
  811. * Don't care otherwise.
  812. */
  813. void sg_miter_stop(struct sg_mapping_iter *miter)
  814. {
  815. WARN_ON(miter->consumed > miter->length);
  816. /* drop resources from the last iteration */
  817. if (miter->addr) {
  818. miter->__offset += miter->consumed;
  819. miter->__remaining -= miter->consumed;
  820. if (miter->__flags & SG_MITER_TO_SG)
  821. flush_dcache_page(miter->page);
  822. if (miter->__flags & SG_MITER_ATOMIC) {
  823. WARN_ON_ONCE(!pagefault_disabled());
  824. kunmap_atomic(miter->addr);
  825. } else
  826. kunmap(miter->page);
  827. miter->page = NULL;
  828. miter->addr = NULL;
  829. miter->length = 0;
  830. miter->consumed = 0;
  831. }
  832. }
  833. EXPORT_SYMBOL(sg_miter_stop);
  834. /**
  835. * sg_copy_buffer - Copy data between a linear buffer and an SG list
  836. * @sgl: The SG list
  837. * @nents: Number of SG entries
  838. * @buf: Where to copy from
  839. * @buflen: The number of bytes to copy
  840. * @skip: Number of bytes to skip before copying
  841. * @to_buffer: transfer direction (true == from an sg list to a
  842. * buffer, false == from a buffer to an sg list)
  843. *
  844. * Returns the number of copied bytes.
  845. *
  846. **/
  847. size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
  848. size_t buflen, off_t skip, bool to_buffer)
  849. {
  850. unsigned int offset = 0;
  851. struct sg_mapping_iter miter;
  852. unsigned int sg_flags = SG_MITER_ATOMIC;
  853. if (to_buffer)
  854. sg_flags |= SG_MITER_FROM_SG;
  855. else
  856. sg_flags |= SG_MITER_TO_SG;
  857. sg_miter_start(&miter, sgl, nents, sg_flags);
  858. if (!sg_miter_skip(&miter, skip))
  859. return 0;
  860. while ((offset < buflen) && sg_miter_next(&miter)) {
  861. unsigned int len;
  862. len = min(miter.length, buflen - offset);
  863. if (to_buffer)
  864. memcpy(buf + offset, miter.addr, len);
  865. else
  866. memcpy(miter.addr, buf + offset, len);
  867. offset += len;
  868. }
  869. sg_miter_stop(&miter);
  870. return offset;
  871. }
  872. EXPORT_SYMBOL(sg_copy_buffer);
  873. /**
  874. * sg_copy_from_buffer - Copy from a linear buffer to an SG list
  875. * @sgl: The SG list
  876. * @nents: Number of SG entries
  877. * @buf: Where to copy from
  878. * @buflen: The number of bytes to copy
  879. *
  880. * Returns the number of copied bytes.
  881. *
  882. **/
  883. size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  884. const void *buf, size_t buflen)
  885. {
  886. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
  887. }
  888. EXPORT_SYMBOL(sg_copy_from_buffer);
  889. /**
  890. * sg_copy_to_buffer - Copy from an SG list to a linear buffer
  891. * @sgl: The SG list
  892. * @nents: Number of SG entries
  893. * @buf: Where to copy to
  894. * @buflen: The number of bytes to copy
  895. *
  896. * Returns the number of copied bytes.
  897. *
  898. **/
  899. size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  900. void *buf, size_t buflen)
  901. {
  902. return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
  903. }
  904. EXPORT_SYMBOL(sg_copy_to_buffer);
  905. /**
  906. * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
  907. * @sgl: The SG list
  908. * @nents: Number of SG entries
  909. * @buf: Where to copy from
  910. * @buflen: The number of bytes to copy
  911. * @skip: Number of bytes to skip before copying
  912. *
  913. * Returns the number of copied bytes.
  914. *
  915. **/
  916. size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  917. const void *buf, size_t buflen, off_t skip)
  918. {
  919. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
  920. }
  921. EXPORT_SYMBOL(sg_pcopy_from_buffer);
  922. /**
  923. * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
  924. * @sgl: The SG list
  925. * @nents: Number of SG entries
  926. * @buf: Where to copy to
  927. * @buflen: The number of bytes to copy
  928. * @skip: Number of bytes to skip before copying
  929. *
  930. * Returns the number of copied bytes.
  931. *
  932. **/
  933. size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  934. void *buf, size_t buflen, off_t skip)
  935. {
  936. return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
  937. }
  938. EXPORT_SYMBOL(sg_pcopy_to_buffer);
  939. /**
  940. * sg_zero_buffer - Zero-out a part of a SG list
  941. * @sgl: The SG list
  942. * @nents: Number of SG entries
  943. * @buflen: The number of bytes to zero out
  944. * @skip: Number of bytes to skip before zeroing
  945. *
  946. * Returns the number of bytes zeroed.
  947. **/
  948. size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
  949. size_t buflen, off_t skip)
  950. {
  951. unsigned int offset = 0;
  952. struct sg_mapping_iter miter;
  953. unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
  954. sg_miter_start(&miter, sgl, nents, sg_flags);
  955. if (!sg_miter_skip(&miter, skip))
  956. return false;
  957. while (offset < buflen && sg_miter_next(&miter)) {
  958. unsigned int len;
  959. len = min(miter.length, buflen - offset);
  960. memset(miter.addr, 0, len);
  961. offset += len;
  962. }
  963. sg_miter_stop(&miter);
  964. return offset;
  965. }
  966. EXPORT_SYMBOL(sg_zero_buffer);