scatterlist.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. /*
  2. * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  3. *
  4. * Scatterlist handling helpers.
  5. *
  6. * This source code is licensed under the GNU General Public License,
  7. * Version 2. See the file COPYING for more details.
  8. */
  9. #include <linux/export.h>
  10. #include <linux/slab.h>
  11. #include <linux/scatterlist.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kmemleak.h>
  14. /**
  15. * sg_next - return the next scatterlist entry in a list
  16. * @sg: The current sg entry
  17. *
  18. * Description:
  19. * Usually the next entry will be @sg@ + 1, but if this sg element is part
  20. * of a chained scatterlist, it could jump to the start of a new
  21. * scatterlist array.
  22. *
  23. **/
  24. struct scatterlist *sg_next(struct scatterlist *sg)
  25. {
  26. #ifdef CONFIG_DEBUG_SG
  27. BUG_ON(sg->sg_magic != SG_MAGIC);
  28. #endif
  29. if (sg_is_last(sg))
  30. return NULL;
  31. sg++;
  32. if (unlikely(sg_is_chain(sg)))
  33. sg = sg_chain_ptr(sg);
  34. return sg;
  35. }
  36. EXPORT_SYMBOL(sg_next);
  37. /**
  38. * sg_nents - return total count of entries in scatterlist
  39. * @sg: The scatterlist
  40. *
  41. * Description:
  42. * Allows to know how many entries are in sg, taking into acount
  43. * chaining as well
  44. *
  45. **/
  46. int sg_nents(struct scatterlist *sg)
  47. {
  48. int nents;
  49. for (nents = 0; sg; sg = sg_next(sg))
  50. nents++;
  51. return nents;
  52. }
  53. EXPORT_SYMBOL(sg_nents);
  54. /**
  55. * sg_nents_for_len - return total count of entries in scatterlist
  56. * needed to satisfy the supplied length
  57. * @sg: The scatterlist
  58. * @len: The total required length
  59. *
  60. * Description:
  61. * Determines the number of entries in sg that are required to meet
  62. * the supplied length, taking into acount chaining as well
  63. *
  64. * Returns:
  65. * the number of sg entries needed, negative error on failure
  66. *
  67. **/
  68. int sg_nents_for_len(struct scatterlist *sg, u64 len)
  69. {
  70. int nents;
  71. u64 total;
  72. if (!len)
  73. return 0;
  74. for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
  75. nents++;
  76. total += sg->length;
  77. if (total >= len)
  78. return nents;
  79. }
  80. return -EINVAL;
  81. }
  82. EXPORT_SYMBOL(sg_nents_for_len);
  83. /**
  84. * sg_last - return the last scatterlist entry in a list
  85. * @sgl: First entry in the scatterlist
  86. * @nents: Number of entries in the scatterlist
  87. *
  88. * Description:
  89. * Should only be used casually, it (currently) scans the entire list
  90. * to get the last entry.
  91. *
  92. * Note that the @sgl@ pointer passed in need not be the first one,
  93. * the important bit is that @nents@ denotes the number of entries that
  94. * exist from @sgl@.
  95. *
  96. **/
  97. struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
  98. {
  99. struct scatterlist *sg, *ret = NULL;
  100. unsigned int i;
  101. for_each_sg(sgl, sg, nents, i)
  102. ret = sg;
  103. #ifdef CONFIG_DEBUG_SG
  104. BUG_ON(sgl[0].sg_magic != SG_MAGIC);
  105. BUG_ON(!sg_is_last(ret));
  106. #endif
  107. return ret;
  108. }
  109. EXPORT_SYMBOL(sg_last);
  110. /**
  111. * sg_init_table - Initialize SG table
  112. * @sgl: The SG table
  113. * @nents: Number of entries in table
  114. *
  115. * Notes:
  116. * If this is part of a chained sg table, sg_mark_end() should be
  117. * used only on the last table part.
  118. *
  119. **/
  120. void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  121. {
  122. memset(sgl, 0, sizeof(*sgl) * nents);
  123. sg_init_marker(sgl, nents);
  124. }
  125. EXPORT_SYMBOL(sg_init_table);
  126. /**
  127. * sg_init_one - Initialize a single entry sg list
  128. * @sg: SG entry
  129. * @buf: Virtual address for IO
  130. * @buflen: IO length
  131. *
  132. **/
  133. void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
  134. {
  135. sg_init_table(sg, 1);
  136. sg_set_buf(sg, buf, buflen);
  137. }
  138. EXPORT_SYMBOL(sg_init_one);
  139. /*
  140. * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
  141. * helpers.
  142. */
  143. static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
  144. {
  145. if (nents == SG_MAX_SINGLE_ALLOC) {
  146. /*
  147. * Kmemleak doesn't track page allocations as they are not
  148. * commonly used (in a raw form) for kernel data structures.
  149. * As we chain together a list of pages and then a normal
  150. * kmalloc (tracked by kmemleak), in order to for that last
  151. * allocation not to become decoupled (and thus a
  152. * false-positive) we need to inform kmemleak of all the
  153. * intermediate allocations.
  154. */
  155. void *ptr = (void *) __get_free_page(gfp_mask);
  156. kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
  157. return ptr;
  158. } else
  159. return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
  160. }
  161. static void sg_kfree(struct scatterlist *sg, unsigned int nents)
  162. {
  163. if (nents == SG_MAX_SINGLE_ALLOC) {
  164. kmemleak_free(sg);
  165. free_page((unsigned long) sg);
  166. } else
  167. kfree(sg);
  168. }
  169. /**
  170. * __sg_free_table - Free a previously mapped sg table
  171. * @table: The sg table header to use
  172. * @max_ents: The maximum number of entries per single scatterlist
  173. * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
  174. * @free_fn: Free function
  175. *
  176. * Description:
  177. * Free an sg table previously allocated and setup with
  178. * __sg_alloc_table(). The @max_ents value must be identical to
  179. * that previously used with __sg_alloc_table().
  180. *
  181. **/
  182. void __sg_free_table(struct sg_table *table, unsigned int max_ents,
  183. bool skip_first_chunk, sg_free_fn *free_fn)
  184. {
  185. struct scatterlist *sgl, *next;
  186. if (unlikely(!table->sgl))
  187. return;
  188. sgl = table->sgl;
  189. while (table->orig_nents) {
  190. unsigned int alloc_size = table->orig_nents;
  191. unsigned int sg_size;
  192. /*
  193. * If we have more than max_ents segments left,
  194. * then assign 'next' to the sg table after the current one.
  195. * sg_size is then one less than alloc size, since the last
  196. * element is the chain pointer.
  197. */
  198. if (alloc_size > max_ents) {
  199. next = sg_chain_ptr(&sgl[max_ents - 1]);
  200. alloc_size = max_ents;
  201. sg_size = alloc_size - 1;
  202. } else {
  203. sg_size = alloc_size;
  204. next = NULL;
  205. }
  206. table->orig_nents -= sg_size;
  207. if (skip_first_chunk)
  208. skip_first_chunk = false;
  209. else
  210. free_fn(sgl, alloc_size);
  211. sgl = next;
  212. }
  213. table->sgl = NULL;
  214. }
  215. EXPORT_SYMBOL(__sg_free_table);
  216. /**
  217. * sg_free_table - Free a previously allocated sg table
  218. * @table: The mapped sg table header
  219. *
  220. **/
  221. void sg_free_table(struct sg_table *table)
  222. {
  223. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
  224. }
  225. EXPORT_SYMBOL(sg_free_table);
  226. /**
  227. * __sg_alloc_table - Allocate and initialize an sg table with given allocator
  228. * @table: The sg table header to use
  229. * @nents: Number of entries in sg list
  230. * @max_ents: The maximum number of entries the allocator returns per call
  231. * @gfp_mask: GFP allocation mask
  232. * @alloc_fn: Allocator to use
  233. *
  234. * Description:
  235. * This function returns a @table @nents long. The allocator is
  236. * defined to return scatterlist chunks of maximum size @max_ents.
  237. * Thus if @nents is bigger than @max_ents, the scatterlists will be
  238. * chained in units of @max_ents.
  239. *
  240. * Notes:
  241. * If this function returns non-0 (eg failure), the caller must call
  242. * __sg_free_table() to cleanup any leftover allocations.
  243. *
  244. **/
  245. int __sg_alloc_table(struct sg_table *table, unsigned int nents,
  246. unsigned int max_ents, struct scatterlist *first_chunk,
  247. gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
  248. {
  249. struct scatterlist *sg, *prv;
  250. unsigned int left;
  251. memset(table, 0, sizeof(*table));
  252. if (nents == 0)
  253. return -EINVAL;
  254. #ifndef CONFIG_ARCH_HAS_SG_CHAIN
  255. if (WARN_ON_ONCE(nents > max_ents))
  256. return -EINVAL;
  257. #endif
  258. left = nents;
  259. prv = NULL;
  260. do {
  261. unsigned int sg_size, alloc_size = left;
  262. if (alloc_size > max_ents) {
  263. alloc_size = max_ents;
  264. sg_size = alloc_size - 1;
  265. } else
  266. sg_size = alloc_size;
  267. left -= sg_size;
  268. if (first_chunk) {
  269. sg = first_chunk;
  270. first_chunk = NULL;
  271. } else {
  272. sg = alloc_fn(alloc_size, gfp_mask);
  273. }
  274. if (unlikely(!sg)) {
  275. /*
  276. * Adjust entry count to reflect that the last
  277. * entry of the previous table won't be used for
  278. * linkage. Without this, sg_kfree() may get
  279. * confused.
  280. */
  281. if (prv)
  282. table->nents = ++table->orig_nents;
  283. return -ENOMEM;
  284. }
  285. sg_init_table(sg, alloc_size);
  286. table->nents = table->orig_nents += sg_size;
  287. /*
  288. * If this is the first mapping, assign the sg table header.
  289. * If this is not the first mapping, chain previous part.
  290. */
  291. if (prv)
  292. sg_chain(prv, max_ents, sg);
  293. else
  294. table->sgl = sg;
  295. /*
  296. * If no more entries after this one, mark the end
  297. */
  298. if (!left)
  299. sg_mark_end(&sg[sg_size - 1]);
  300. prv = sg;
  301. } while (left);
  302. return 0;
  303. }
  304. EXPORT_SYMBOL(__sg_alloc_table);
  305. /**
  306. * sg_alloc_table - Allocate and initialize an sg table
  307. * @table: The sg table header to use
  308. * @nents: Number of entries in sg list
  309. * @gfp_mask: GFP allocation mask
  310. *
  311. * Description:
  312. * Allocate and initialize an sg table. If @nents@ is larger than
  313. * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
  314. *
  315. **/
  316. int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
  317. {
  318. int ret;
  319. ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
  320. NULL, gfp_mask, sg_kmalloc);
  321. if (unlikely(ret))
  322. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
  323. return ret;
  324. }
  325. EXPORT_SYMBOL(sg_alloc_table);
  326. /**
  327. * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
  328. * an array of pages
  329. * @sgt: The sg table header to use
  330. * @pages: Pointer to an array of page pointers
  331. * @n_pages: Number of pages in the pages array
  332. * @offset: Offset from start of the first page to the start of a buffer
  333. * @size: Number of valid bytes in the buffer (after offset)
  334. * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
  335. * @gfp_mask: GFP allocation mask
  336. *
  337. * Description:
  338. * Allocate and initialize an sg table from a list of pages. Contiguous
  339. * ranges of the pages are squashed into a single scatterlist node up to the
  340. * maximum size specified in @max_segment. An user may provide an offset at a
  341. * start and a size of valid data in a buffer specified by the page array.
  342. * The returned sg table is released by sg_free_table.
  343. *
  344. * Returns:
  345. * 0 on success, negative error on failure
  346. */
  347. int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  348. unsigned int n_pages, unsigned int offset,
  349. unsigned long size, unsigned int max_segment,
  350. gfp_t gfp_mask)
  351. {
  352. unsigned int chunks, cur_page, seg_len, i;
  353. int ret;
  354. struct scatterlist *s;
  355. if (WARN_ON(!max_segment || offset_in_page(max_segment)))
  356. return -EINVAL;
  357. /* compute number of contiguous chunks */
  358. chunks = 1;
  359. seg_len = 0;
  360. for (i = 1; i < n_pages; i++) {
  361. seg_len += PAGE_SIZE;
  362. if (seg_len >= max_segment ||
  363. page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
  364. chunks++;
  365. seg_len = 0;
  366. }
  367. }
  368. ret = sg_alloc_table(sgt, chunks, gfp_mask);
  369. if (unlikely(ret))
  370. return ret;
  371. /* merging chunks and putting them into the scatterlist */
  372. cur_page = 0;
  373. for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
  374. unsigned int j, chunk_size;
  375. /* look for the end of the current chunk */
  376. seg_len = 0;
  377. for (j = cur_page + 1; j < n_pages; j++) {
  378. seg_len += PAGE_SIZE;
  379. if (seg_len >= max_segment ||
  380. page_to_pfn(pages[j]) !=
  381. page_to_pfn(pages[j - 1]) + 1)
  382. break;
  383. }
  384. chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
  385. sg_set_page(s, pages[cur_page],
  386. min_t(unsigned long, size, chunk_size), offset);
  387. size -= chunk_size;
  388. offset = 0;
  389. cur_page = j;
  390. }
  391. return 0;
  392. }
  393. EXPORT_SYMBOL(__sg_alloc_table_from_pages);
  394. /**
  395. * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  396. * an array of pages
  397. * @sgt: The sg table header to use
  398. * @pages: Pointer to an array of page pointers
  399. * @n_pages: Number of pages in the pages array
  400. * @offset: Offset from start of the first page to the start of a buffer
  401. * @size: Number of valid bytes in the buffer (after offset)
  402. * @gfp_mask: GFP allocation mask
  403. *
  404. * Description:
  405. * Allocate and initialize an sg table from a list of pages. Contiguous
  406. * ranges of the pages are squashed into a single scatterlist node. A user
  407. * may provide an offset at a start and a size of valid data in a buffer
  408. * specified by the page array. The returned sg table is released by
  409. * sg_free_table.
  410. *
  411. * Returns:
  412. * 0 on success, negative error on failure
  413. */
  414. int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  415. unsigned int n_pages, unsigned int offset,
  416. unsigned long size, gfp_t gfp_mask)
  417. {
  418. return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
  419. SCATTERLIST_MAX_SEGMENT, gfp_mask);
  420. }
  421. EXPORT_SYMBOL(sg_alloc_table_from_pages);
  422. #ifdef CONFIG_SGL_ALLOC
  423. /**
  424. * sgl_alloc_order - allocate a scatterlist and its pages
  425. * @length: Length in bytes of the scatterlist. Must be at least one
  426. * @order: Second argument for alloc_pages()
  427. * @chainable: Whether or not to allocate an extra element in the scatterlist
  428. * for scatterlist chaining purposes
  429. * @gfp: Memory allocation flags
  430. * @nent_p: [out] Number of entries in the scatterlist that have pages
  431. *
  432. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  433. */
  434. struct scatterlist *sgl_alloc_order(unsigned long long length,
  435. unsigned int order, bool chainable,
  436. gfp_t gfp, unsigned int *nent_p)
  437. {
  438. struct scatterlist *sgl, *sg;
  439. struct page *page;
  440. unsigned int nent, nalloc;
  441. u32 elem_len;
  442. nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
  443. /* Check for integer overflow */
  444. if (length > (nent << (PAGE_SHIFT + order)))
  445. return NULL;
  446. nalloc = nent;
  447. if (chainable) {
  448. /* Check for integer overflow */
  449. if (nalloc + 1 < nalloc)
  450. return NULL;
  451. nalloc++;
  452. }
  453. sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
  454. (gfp & ~GFP_DMA) | __GFP_ZERO);
  455. if (!sgl)
  456. return NULL;
  457. sg_init_table(sgl, nalloc);
  458. sg = sgl;
  459. while (length) {
  460. elem_len = min_t(u64, length, PAGE_SIZE << order);
  461. page = alloc_pages(gfp, order);
  462. if (!page) {
  463. sgl_free(sgl);
  464. return NULL;
  465. }
  466. sg_set_page(sg, page, elem_len, 0);
  467. length -= elem_len;
  468. sg = sg_next(sg);
  469. }
  470. WARN_ONCE(length, "length = %lld\n", length);
  471. if (nent_p)
  472. *nent_p = nent;
  473. return sgl;
  474. }
  475. EXPORT_SYMBOL(sgl_alloc_order);
  476. /**
  477. * sgl_alloc - allocate a scatterlist and its pages
  478. * @length: Length in bytes of the scatterlist
  479. * @gfp: Memory allocation flags
  480. * @nent_p: [out] Number of entries in the scatterlist
  481. *
  482. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  483. */
  484. struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
  485. unsigned int *nent_p)
  486. {
  487. return sgl_alloc_order(length, 0, false, gfp, nent_p);
  488. }
  489. EXPORT_SYMBOL(sgl_alloc);
  490. /**
  491. * sgl_free_n_order - free a scatterlist and its pages
  492. * @sgl: Scatterlist with one or more elements
  493. * @nents: Maximum number of elements to free
  494. * @order: Second argument for __free_pages()
  495. *
  496. * Notes:
  497. * - If several scatterlists have been chained and each chain element is
  498. * freed separately then it's essential to set nents correctly to avoid that a
  499. * page would get freed twice.
  500. * - All pages in a chained scatterlist can be freed at once by setting @nents
  501. * to a high number.
  502. */
  503. void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
  504. {
  505. struct scatterlist *sg;
  506. struct page *page;
  507. int i;
  508. for_each_sg(sgl, sg, nents, i) {
  509. if (!sg)
  510. break;
  511. page = sg_page(sg);
  512. if (page)
  513. __free_pages(page, order);
  514. }
  515. kfree(sgl);
  516. }
  517. EXPORT_SYMBOL(sgl_free_n_order);
  518. /**
  519. * sgl_free_order - free a scatterlist and its pages
  520. * @sgl: Scatterlist with one or more elements
  521. * @order: Second argument for __free_pages()
  522. */
  523. void sgl_free_order(struct scatterlist *sgl, int order)
  524. {
  525. sgl_free_n_order(sgl, INT_MAX, order);
  526. }
  527. EXPORT_SYMBOL(sgl_free_order);
  528. /**
  529. * sgl_free - free a scatterlist and its pages
  530. * @sgl: Scatterlist with one or more elements
  531. */
  532. void sgl_free(struct scatterlist *sgl)
  533. {
  534. sgl_free_order(sgl, 0);
  535. }
  536. EXPORT_SYMBOL(sgl_free);
  537. #endif /* CONFIG_SGL_ALLOC */
  538. void __sg_page_iter_start(struct sg_page_iter *piter,
  539. struct scatterlist *sglist, unsigned int nents,
  540. unsigned long pgoffset)
  541. {
  542. piter->__pg_advance = 0;
  543. piter->__nents = nents;
  544. piter->sg = sglist;
  545. piter->sg_pgoffset = pgoffset;
  546. }
  547. EXPORT_SYMBOL(__sg_page_iter_start);
  548. static int sg_page_count(struct scatterlist *sg)
  549. {
  550. return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
  551. }
  552. bool __sg_page_iter_next(struct sg_page_iter *piter)
  553. {
  554. if (!piter->__nents || !piter->sg)
  555. return false;
  556. piter->sg_pgoffset += piter->__pg_advance;
  557. piter->__pg_advance = 1;
  558. while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
  559. piter->sg_pgoffset -= sg_page_count(piter->sg);
  560. piter->sg = sg_next(piter->sg);
  561. if (!--piter->__nents || !piter->sg)
  562. return false;
  563. }
  564. return true;
  565. }
  566. EXPORT_SYMBOL(__sg_page_iter_next);
  567. /**
  568. * sg_miter_start - start mapping iteration over a sg list
  569. * @miter: sg mapping iter to be started
  570. * @sgl: sg list to iterate over
  571. * @nents: number of sg entries
  572. *
  573. * Description:
  574. * Starts mapping iterator @miter.
  575. *
  576. * Context:
  577. * Don't care.
  578. */
  579. void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  580. unsigned int nents, unsigned int flags)
  581. {
  582. memset(miter, 0, sizeof(struct sg_mapping_iter));
  583. __sg_page_iter_start(&miter->piter, sgl, nents, 0);
  584. WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
  585. miter->__flags = flags;
  586. }
  587. EXPORT_SYMBOL(sg_miter_start);
  588. static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
  589. {
  590. if (!miter->__remaining) {
  591. struct scatterlist *sg;
  592. unsigned long pgoffset;
  593. if (!__sg_page_iter_next(&miter->piter))
  594. return false;
  595. sg = miter->piter.sg;
  596. pgoffset = miter->piter.sg_pgoffset;
  597. miter->__offset = pgoffset ? 0 : sg->offset;
  598. miter->__remaining = sg->offset + sg->length -
  599. (pgoffset << PAGE_SHIFT) - miter->__offset;
  600. miter->__remaining = min_t(unsigned long, miter->__remaining,
  601. PAGE_SIZE - miter->__offset);
  602. }
  603. return true;
  604. }
  605. /**
  606. * sg_miter_skip - reposition mapping iterator
  607. * @miter: sg mapping iter to be skipped
  608. * @offset: number of bytes to plus the current location
  609. *
  610. * Description:
  611. * Sets the offset of @miter to its current location plus @offset bytes.
  612. * If mapping iterator @miter has been proceeded by sg_miter_next(), this
  613. * stops @miter.
  614. *
  615. * Context:
  616. * Don't care if @miter is stopped, or not proceeded yet.
  617. * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
  618. *
  619. * Returns:
  620. * true if @miter contains the valid mapping. false if end of sg
  621. * list is reached.
  622. */
  623. bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
  624. {
  625. sg_miter_stop(miter);
  626. while (offset) {
  627. off_t consumed;
  628. if (!sg_miter_get_next_page(miter))
  629. return false;
  630. consumed = min_t(off_t, offset, miter->__remaining);
  631. miter->__offset += consumed;
  632. miter->__remaining -= consumed;
  633. offset -= consumed;
  634. }
  635. return true;
  636. }
  637. EXPORT_SYMBOL(sg_miter_skip);
  638. /**
  639. * sg_miter_next - proceed mapping iterator to the next mapping
  640. * @miter: sg mapping iter to proceed
  641. *
  642. * Description:
  643. * Proceeds @miter to the next mapping. @miter should have been started
  644. * using sg_miter_start(). On successful return, @miter->page,
  645. * @miter->addr and @miter->length point to the current mapping.
  646. *
  647. * Context:
  648. * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
  649. * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
  650. *
  651. * Returns:
  652. * true if @miter contains the next mapping. false if end of sg
  653. * list is reached.
  654. */
  655. bool sg_miter_next(struct sg_mapping_iter *miter)
  656. {
  657. sg_miter_stop(miter);
  658. /*
  659. * Get to the next page if necessary.
  660. * __remaining, __offset is adjusted by sg_miter_stop
  661. */
  662. if (!sg_miter_get_next_page(miter))
  663. return false;
  664. miter->page = sg_page_iter_page(&miter->piter);
  665. miter->consumed = miter->length = miter->__remaining;
  666. if (miter->__flags & SG_MITER_ATOMIC)
  667. miter->addr = kmap_atomic(miter->page) + miter->__offset;
  668. else
  669. miter->addr = kmap(miter->page) + miter->__offset;
  670. return true;
  671. }
  672. EXPORT_SYMBOL(sg_miter_next);
  673. /**
  674. * sg_miter_stop - stop mapping iteration
  675. * @miter: sg mapping iter to be stopped
  676. *
  677. * Description:
  678. * Stops mapping iterator @miter. @miter should have been started
  679. * using sg_miter_start(). A stopped iteration can be resumed by
  680. * calling sg_miter_next() on it. This is useful when resources (kmap)
  681. * need to be released during iteration.
  682. *
  683. * Context:
  684. * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
  685. * otherwise.
  686. */
  687. void sg_miter_stop(struct sg_mapping_iter *miter)
  688. {
  689. WARN_ON(miter->consumed > miter->length);
  690. /* drop resources from the last iteration */
  691. if (miter->addr) {
  692. miter->__offset += miter->consumed;
  693. miter->__remaining -= miter->consumed;
  694. if ((miter->__flags & SG_MITER_TO_SG) &&
  695. !PageSlab(miter->page))
  696. flush_kernel_dcache_page(miter->page);
  697. if (miter->__flags & SG_MITER_ATOMIC) {
  698. WARN_ON_ONCE(preemptible());
  699. kunmap_atomic(miter->addr);
  700. } else
  701. kunmap(miter->page);
  702. miter->page = NULL;
  703. miter->addr = NULL;
  704. miter->length = 0;
  705. miter->consumed = 0;
  706. }
  707. }
  708. EXPORT_SYMBOL(sg_miter_stop);
  709. /**
  710. * sg_copy_buffer - Copy data between a linear buffer and an SG list
  711. * @sgl: The SG list
  712. * @nents: Number of SG entries
  713. * @buf: Where to copy from
  714. * @buflen: The number of bytes to copy
  715. * @skip: Number of bytes to skip before copying
  716. * @to_buffer: transfer direction (true == from an sg list to a
  717. * buffer, false == from a buffer to an sg list
  718. *
  719. * Returns the number of copied bytes.
  720. *
  721. **/
  722. size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
  723. size_t buflen, off_t skip, bool to_buffer)
  724. {
  725. unsigned int offset = 0;
  726. struct sg_mapping_iter miter;
  727. unsigned int sg_flags = SG_MITER_ATOMIC;
  728. if (to_buffer)
  729. sg_flags |= SG_MITER_FROM_SG;
  730. else
  731. sg_flags |= SG_MITER_TO_SG;
  732. sg_miter_start(&miter, sgl, nents, sg_flags);
  733. if (!sg_miter_skip(&miter, skip))
  734. return false;
  735. while ((offset < buflen) && sg_miter_next(&miter)) {
  736. unsigned int len;
  737. len = min(miter.length, buflen - offset);
  738. if (to_buffer)
  739. memcpy(buf + offset, miter.addr, len);
  740. else
  741. memcpy(miter.addr, buf + offset, len);
  742. offset += len;
  743. }
  744. sg_miter_stop(&miter);
  745. return offset;
  746. }
  747. EXPORT_SYMBOL(sg_copy_buffer);
  748. /**
  749. * sg_copy_from_buffer - Copy from a linear buffer to an SG list
  750. * @sgl: The SG list
  751. * @nents: Number of SG entries
  752. * @buf: Where to copy from
  753. * @buflen: The number of bytes to copy
  754. *
  755. * Returns the number of copied bytes.
  756. *
  757. **/
  758. size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  759. const void *buf, size_t buflen)
  760. {
  761. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
  762. }
  763. EXPORT_SYMBOL(sg_copy_from_buffer);
  764. /**
  765. * sg_copy_to_buffer - Copy from an SG list to a linear buffer
  766. * @sgl: The SG list
  767. * @nents: Number of SG entries
  768. * @buf: Where to copy to
  769. * @buflen: The number of bytes to copy
  770. *
  771. * Returns the number of copied bytes.
  772. *
  773. **/
  774. size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  775. void *buf, size_t buflen)
  776. {
  777. return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
  778. }
  779. EXPORT_SYMBOL(sg_copy_to_buffer);
  780. /**
  781. * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
  782. * @sgl: The SG list
  783. * @nents: Number of SG entries
  784. * @buf: Where to copy from
  785. * @buflen: The number of bytes to copy
  786. * @skip: Number of bytes to skip before copying
  787. *
  788. * Returns the number of copied bytes.
  789. *
  790. **/
  791. size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  792. const void *buf, size_t buflen, off_t skip)
  793. {
  794. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
  795. }
  796. EXPORT_SYMBOL(sg_pcopy_from_buffer);
  797. /**
  798. * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
  799. * @sgl: The SG list
  800. * @nents: Number of SG entries
  801. * @buf: Where to copy to
  802. * @buflen: The number of bytes to copy
  803. * @skip: Number of bytes to skip before copying
  804. *
  805. * Returns the number of copied bytes.
  806. *
  807. **/
  808. size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  809. void *buf, size_t buflen, off_t skip)
  810. {
  811. return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
  812. }
  813. EXPORT_SYMBOL(sg_pcopy_to_buffer);
  814. /**
  815. * sg_zero_buffer - Zero-out a part of a SG list
  816. * @sgl: The SG list
  817. * @nents: Number of SG entries
  818. * @buflen: The number of bytes to zero out
  819. * @skip: Number of bytes to skip before zeroing
  820. *
  821. * Returns the number of bytes zeroed.
  822. **/
  823. size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
  824. size_t buflen, off_t skip)
  825. {
  826. unsigned int offset = 0;
  827. struct sg_mapping_iter miter;
  828. unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
  829. sg_miter_start(&miter, sgl, nents, sg_flags);
  830. if (!sg_miter_skip(&miter, skip))
  831. return false;
  832. while (offset < buflen && sg_miter_next(&miter)) {
  833. unsigned int len;
  834. len = min(miter.length, buflen - offset);
  835. memset(miter.addr, 0, len);
  836. offset += len;
  837. }
  838. sg_miter_stop(&miter);
  839. return offset;
  840. }
  841. EXPORT_SYMBOL(sg_zero_buffer);