iov_iter.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #include <linux/vmalloc.h>
  6. #include <net/checksum.h>
  7. #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
  8. size_t left; \
  9. size_t wanted = n; \
  10. __p = i->iov; \
  11. __v.iov_len = min(n, __p->iov_len - skip); \
  12. if (likely(__v.iov_len)) { \
  13. __v.iov_base = __p->iov_base + skip; \
  14. left = (STEP); \
  15. __v.iov_len -= left; \
  16. skip += __v.iov_len; \
  17. n -= __v.iov_len; \
  18. } else { \
  19. left = 0; \
  20. } \
  21. while (unlikely(!left && n)) { \
  22. __p++; \
  23. __v.iov_len = min(n, __p->iov_len); \
  24. if (unlikely(!__v.iov_len)) \
  25. continue; \
  26. __v.iov_base = __p->iov_base; \
  27. left = (STEP); \
  28. __v.iov_len -= left; \
  29. skip = __v.iov_len; \
  30. n -= __v.iov_len; \
  31. } \
  32. n = wanted - n; \
  33. }
  34. #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
  35. size_t wanted = n; \
  36. __p = i->kvec; \
  37. __v.iov_len = min(n, __p->iov_len - skip); \
  38. if (likely(__v.iov_len)) { \
  39. __v.iov_base = __p->iov_base + skip; \
  40. (void)(STEP); \
  41. skip += __v.iov_len; \
  42. n -= __v.iov_len; \
  43. } \
  44. while (unlikely(n)) { \
  45. __p++; \
  46. __v.iov_len = min(n, __p->iov_len); \
  47. if (unlikely(!__v.iov_len)) \
  48. continue; \
  49. __v.iov_base = __p->iov_base; \
  50. (void)(STEP); \
  51. skip = __v.iov_len; \
  52. n -= __v.iov_len; \
  53. } \
  54. n = wanted; \
  55. }
  56. #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
  57. size_t wanted = n; \
  58. __p = i->bvec; \
  59. __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
  60. if (likely(__v.bv_len)) { \
  61. __v.bv_page = __p->bv_page; \
  62. __v.bv_offset = __p->bv_offset + skip; \
  63. (void)(STEP); \
  64. skip += __v.bv_len; \
  65. n -= __v.bv_len; \
  66. } \
  67. while (unlikely(n)) { \
  68. __p++; \
  69. __v.bv_len = min_t(size_t, n, __p->bv_len); \
  70. if (unlikely(!__v.bv_len)) \
  71. continue; \
  72. __v.bv_page = __p->bv_page; \
  73. __v.bv_offset = __p->bv_offset; \
  74. (void)(STEP); \
  75. skip = __v.bv_len; \
  76. n -= __v.bv_len; \
  77. } \
  78. n = wanted; \
  79. }
  80. #define iterate_all_kinds(i, n, v, I, B, K) { \
  81. size_t skip = i->iov_offset; \
  82. if (unlikely(i->type & ITER_BVEC)) { \
  83. const struct bio_vec *bvec; \
  84. struct bio_vec v; \
  85. iterate_bvec(i, n, v, bvec, skip, (B)) \
  86. } else if (unlikely(i->type & ITER_KVEC)) { \
  87. const struct kvec *kvec; \
  88. struct kvec v; \
  89. iterate_kvec(i, n, v, kvec, skip, (K)) \
  90. } else { \
  91. const struct iovec *iov; \
  92. struct iovec v; \
  93. iterate_iovec(i, n, v, iov, skip, (I)) \
  94. } \
  95. }
  96. #define iterate_and_advance(i, n, v, I, B, K) { \
  97. size_t skip = i->iov_offset; \
  98. if (unlikely(i->type & ITER_BVEC)) { \
  99. const struct bio_vec *bvec; \
  100. struct bio_vec v; \
  101. iterate_bvec(i, n, v, bvec, skip, (B)) \
  102. if (skip == bvec->bv_len) { \
  103. bvec++; \
  104. skip = 0; \
  105. } \
  106. i->nr_segs -= bvec - i->bvec; \
  107. i->bvec = bvec; \
  108. } else if (unlikely(i->type & ITER_KVEC)) { \
  109. const struct kvec *kvec; \
  110. struct kvec v; \
  111. iterate_kvec(i, n, v, kvec, skip, (K)) \
  112. if (skip == kvec->iov_len) { \
  113. kvec++; \
  114. skip = 0; \
  115. } \
  116. i->nr_segs -= kvec - i->kvec; \
  117. i->kvec = kvec; \
  118. } else { \
  119. const struct iovec *iov; \
  120. struct iovec v; \
  121. iterate_iovec(i, n, v, iov, skip, (I)) \
  122. if (skip == iov->iov_len) { \
  123. iov++; \
  124. skip = 0; \
  125. } \
  126. i->nr_segs -= iov - i->iov; \
  127. i->iov = iov; \
  128. } \
  129. i->count -= n; \
  130. i->iov_offset = skip; \
  131. }
  132. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  133. struct iov_iter *i)
  134. {
  135. size_t skip, copy, left, wanted;
  136. const struct iovec *iov;
  137. char __user *buf;
  138. void *kaddr, *from;
  139. if (unlikely(bytes > i->count))
  140. bytes = i->count;
  141. if (unlikely(!bytes))
  142. return 0;
  143. wanted = bytes;
  144. iov = i->iov;
  145. skip = i->iov_offset;
  146. buf = iov->iov_base + skip;
  147. copy = min(bytes, iov->iov_len - skip);
  148. if (!fault_in_pages_writeable(buf, copy)) {
  149. kaddr = kmap_atomic(page);
  150. from = kaddr + offset;
  151. /* first chunk, usually the only one */
  152. left = __copy_to_user_inatomic(buf, from, copy);
  153. copy -= left;
  154. skip += copy;
  155. from += copy;
  156. bytes -= copy;
  157. while (unlikely(!left && bytes)) {
  158. iov++;
  159. buf = iov->iov_base;
  160. copy = min(bytes, iov->iov_len);
  161. left = __copy_to_user_inatomic(buf, from, copy);
  162. copy -= left;
  163. skip = copy;
  164. from += copy;
  165. bytes -= copy;
  166. }
  167. if (likely(!bytes)) {
  168. kunmap_atomic(kaddr);
  169. goto done;
  170. }
  171. offset = from - kaddr;
  172. buf += copy;
  173. kunmap_atomic(kaddr);
  174. copy = min(bytes, iov->iov_len - skip);
  175. }
  176. /* Too bad - revert to non-atomic kmap */
  177. kaddr = kmap(page);
  178. from = kaddr + offset;
  179. left = __copy_to_user(buf, from, copy);
  180. copy -= left;
  181. skip += copy;
  182. from += copy;
  183. bytes -= copy;
  184. while (unlikely(!left && bytes)) {
  185. iov++;
  186. buf = iov->iov_base;
  187. copy = min(bytes, iov->iov_len);
  188. left = __copy_to_user(buf, from, copy);
  189. copy -= left;
  190. skip = copy;
  191. from += copy;
  192. bytes -= copy;
  193. }
  194. kunmap(page);
  195. done:
  196. if (skip == iov->iov_len) {
  197. iov++;
  198. skip = 0;
  199. }
  200. i->count -= wanted - bytes;
  201. i->nr_segs -= iov - i->iov;
  202. i->iov = iov;
  203. i->iov_offset = skip;
  204. return wanted - bytes;
  205. }
  206. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  207. struct iov_iter *i)
  208. {
  209. size_t skip, copy, left, wanted;
  210. const struct iovec *iov;
  211. char __user *buf;
  212. void *kaddr, *to;
  213. if (unlikely(bytes > i->count))
  214. bytes = i->count;
  215. if (unlikely(!bytes))
  216. return 0;
  217. wanted = bytes;
  218. iov = i->iov;
  219. skip = i->iov_offset;
  220. buf = iov->iov_base + skip;
  221. copy = min(bytes, iov->iov_len - skip);
  222. if (!fault_in_pages_readable(buf, copy)) {
  223. kaddr = kmap_atomic(page);
  224. to = kaddr + offset;
  225. /* first chunk, usually the only one */
  226. left = __copy_from_user_inatomic(to, buf, copy);
  227. copy -= left;
  228. skip += copy;
  229. to += copy;
  230. bytes -= copy;
  231. while (unlikely(!left && bytes)) {
  232. iov++;
  233. buf = iov->iov_base;
  234. copy = min(bytes, iov->iov_len);
  235. left = __copy_from_user_inatomic(to, buf, copy);
  236. copy -= left;
  237. skip = copy;
  238. to += copy;
  239. bytes -= copy;
  240. }
  241. if (likely(!bytes)) {
  242. kunmap_atomic(kaddr);
  243. goto done;
  244. }
  245. offset = to - kaddr;
  246. buf += copy;
  247. kunmap_atomic(kaddr);
  248. copy = min(bytes, iov->iov_len - skip);
  249. }
  250. /* Too bad - revert to non-atomic kmap */
  251. kaddr = kmap(page);
  252. to = kaddr + offset;
  253. left = __copy_from_user(to, buf, copy);
  254. copy -= left;
  255. skip += copy;
  256. to += copy;
  257. bytes -= copy;
  258. while (unlikely(!left && bytes)) {
  259. iov++;
  260. buf = iov->iov_base;
  261. copy = min(bytes, iov->iov_len);
  262. left = __copy_from_user(to, buf, copy);
  263. copy -= left;
  264. skip = copy;
  265. to += copy;
  266. bytes -= copy;
  267. }
  268. kunmap(page);
  269. done:
  270. if (skip == iov->iov_len) {
  271. iov++;
  272. skip = 0;
  273. }
  274. i->count -= wanted - bytes;
  275. i->nr_segs -= iov - i->iov;
  276. i->iov = iov;
  277. i->iov_offset = skip;
  278. return wanted - bytes;
  279. }
  280. /*
  281. * Fault in the first iovec of the given iov_iter, to a maximum length
  282. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  283. * accessed (ie. because it is an invalid address).
  284. *
  285. * writev-intensive code may want this to prefault several iovecs -- that
  286. * would be possible (callers must not rely on the fact that _only_ the
  287. * first iovec will be faulted with the current implementation).
  288. */
  289. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  290. {
  291. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  292. char __user *buf = i->iov->iov_base + i->iov_offset;
  293. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  294. return fault_in_pages_readable(buf, bytes);
  295. }
  296. return 0;
  297. }
  298. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  299. void iov_iter_init(struct iov_iter *i, int direction,
  300. const struct iovec *iov, unsigned long nr_segs,
  301. size_t count)
  302. {
  303. /* It will get better. Eventually... */
  304. if (segment_eq(get_fs(), KERNEL_DS)) {
  305. direction |= ITER_KVEC;
  306. i->type = direction;
  307. i->kvec = (struct kvec *)iov;
  308. } else {
  309. i->type = direction;
  310. i->iov = iov;
  311. }
  312. i->nr_segs = nr_segs;
  313. i->iov_offset = 0;
  314. i->count = count;
  315. }
  316. EXPORT_SYMBOL(iov_iter_init);
  317. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  318. {
  319. char *from = kmap_atomic(page);
  320. memcpy(to, from + offset, len);
  321. kunmap_atomic(from);
  322. }
  323. static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
  324. {
  325. char *to = kmap_atomic(page);
  326. memcpy(to + offset, from, len);
  327. kunmap_atomic(to);
  328. }
  329. static void memzero_page(struct page *page, size_t offset, size_t len)
  330. {
  331. char *addr = kmap_atomic(page);
  332. memset(addr + offset, 0, len);
  333. kunmap_atomic(addr);
  334. }
  335. size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
  336. {
  337. char *from = addr;
  338. if (unlikely(bytes > i->count))
  339. bytes = i->count;
  340. if (unlikely(!bytes))
  341. return 0;
  342. iterate_and_advance(i, bytes, v,
  343. __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
  344. v.iov_len),
  345. memcpy_to_page(v.bv_page, v.bv_offset,
  346. (from += v.bv_len) - v.bv_len, v.bv_len),
  347. memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
  348. )
  349. return bytes;
  350. }
  351. EXPORT_SYMBOL(copy_to_iter);
  352. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  353. {
  354. char *to = addr;
  355. if (unlikely(bytes > i->count))
  356. bytes = i->count;
  357. if (unlikely(!bytes))
  358. return 0;
  359. iterate_and_advance(i, bytes, v,
  360. __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
  361. v.iov_len),
  362. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  363. v.bv_offset, v.bv_len),
  364. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  365. )
  366. return bytes;
  367. }
  368. EXPORT_SYMBOL(copy_from_iter);
  369. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  370. {
  371. char *to = addr;
  372. if (unlikely(bytes > i->count))
  373. bytes = i->count;
  374. if (unlikely(!bytes))
  375. return 0;
  376. iterate_and_advance(i, bytes, v,
  377. __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  378. v.iov_base, v.iov_len),
  379. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  380. v.bv_offset, v.bv_len),
  381. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  382. )
  383. return bytes;
  384. }
  385. EXPORT_SYMBOL(copy_from_iter_nocache);
  386. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  387. struct iov_iter *i)
  388. {
  389. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  390. void *kaddr = kmap_atomic(page);
  391. size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  392. kunmap_atomic(kaddr);
  393. return wanted;
  394. } else
  395. return copy_page_to_iter_iovec(page, offset, bytes, i);
  396. }
  397. EXPORT_SYMBOL(copy_page_to_iter);
  398. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  399. struct iov_iter *i)
  400. {
  401. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  402. void *kaddr = kmap_atomic(page);
  403. size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
  404. kunmap_atomic(kaddr);
  405. return wanted;
  406. } else
  407. return copy_page_from_iter_iovec(page, offset, bytes, i);
  408. }
  409. EXPORT_SYMBOL(copy_page_from_iter);
  410. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  411. {
  412. if (unlikely(bytes > i->count))
  413. bytes = i->count;
  414. if (unlikely(!bytes))
  415. return 0;
  416. iterate_and_advance(i, bytes, v,
  417. __clear_user(v.iov_base, v.iov_len),
  418. memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  419. memset(v.iov_base, 0, v.iov_len)
  420. )
  421. return bytes;
  422. }
  423. EXPORT_SYMBOL(iov_iter_zero);
  424. size_t iov_iter_copy_from_user_atomic(struct page *page,
  425. struct iov_iter *i, unsigned long offset, size_t bytes)
  426. {
  427. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  428. iterate_all_kinds(i, bytes, v,
  429. __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  430. v.iov_base, v.iov_len),
  431. memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
  432. v.bv_offset, v.bv_len),
  433. memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  434. )
  435. kunmap_atomic(kaddr);
  436. return bytes;
  437. }
  438. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  439. void iov_iter_advance(struct iov_iter *i, size_t size)
  440. {
  441. iterate_and_advance(i, size, v, 0, 0, 0)
  442. }
  443. EXPORT_SYMBOL(iov_iter_advance);
  444. /*
  445. * Return the count of just the current iov_iter segment.
  446. */
  447. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  448. {
  449. if (i->nr_segs == 1)
  450. return i->count;
  451. else if (i->type & ITER_BVEC)
  452. return min(i->count, i->bvec->bv_len - i->iov_offset);
  453. else
  454. return min(i->count, i->iov->iov_len - i->iov_offset);
  455. }
  456. EXPORT_SYMBOL(iov_iter_single_seg_count);
  457. void iov_iter_kvec(struct iov_iter *i, int direction,
  458. const struct kvec *iov, unsigned long nr_segs,
  459. size_t count)
  460. {
  461. BUG_ON(!(direction & ITER_KVEC));
  462. i->type = direction;
  463. i->kvec = (struct kvec *)iov;
  464. i->nr_segs = nr_segs;
  465. i->iov_offset = 0;
  466. i->count = count;
  467. }
  468. EXPORT_SYMBOL(iov_iter_kvec);
  469. unsigned long iov_iter_alignment(const struct iov_iter *i)
  470. {
  471. unsigned long res = 0;
  472. size_t size = i->count;
  473. if (!size)
  474. return 0;
  475. iterate_all_kinds(i, size, v,
  476. (res |= (unsigned long)v.iov_base | v.iov_len, 0),
  477. res |= v.bv_offset | v.bv_len,
  478. res |= (unsigned long)v.iov_base | v.iov_len
  479. )
  480. return res;
  481. }
  482. EXPORT_SYMBOL(iov_iter_alignment);
  483. ssize_t iov_iter_get_pages(struct iov_iter *i,
  484. struct page **pages, size_t maxsize, unsigned maxpages,
  485. size_t *start)
  486. {
  487. if (maxsize > i->count)
  488. maxsize = i->count;
  489. if (!maxsize)
  490. return 0;
  491. iterate_all_kinds(i, maxsize, v, ({
  492. unsigned long addr = (unsigned long)v.iov_base;
  493. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  494. int n;
  495. int res;
  496. if (len > maxpages * PAGE_SIZE)
  497. len = maxpages * PAGE_SIZE;
  498. addr &= ~(PAGE_SIZE - 1);
  499. n = DIV_ROUND_UP(len, PAGE_SIZE);
  500. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  501. if (unlikely(res < 0))
  502. return res;
  503. return (res == n ? len : res * PAGE_SIZE) - *start;
  504. 0;}),({
  505. /* can't be more than PAGE_SIZE */
  506. *start = v.bv_offset;
  507. get_page(*pages = v.bv_page);
  508. return v.bv_len;
  509. }),({
  510. return -EFAULT;
  511. })
  512. )
  513. return 0;
  514. }
  515. EXPORT_SYMBOL(iov_iter_get_pages);
  516. static struct page **get_pages_array(size_t n)
  517. {
  518. struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  519. if (!p)
  520. p = vmalloc(n * sizeof(struct page *));
  521. return p;
  522. }
  523. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  524. struct page ***pages, size_t maxsize,
  525. size_t *start)
  526. {
  527. struct page **p;
  528. if (maxsize > i->count)
  529. maxsize = i->count;
  530. if (!maxsize)
  531. return 0;
  532. iterate_all_kinds(i, maxsize, v, ({
  533. unsigned long addr = (unsigned long)v.iov_base;
  534. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  535. int n;
  536. int res;
  537. addr &= ~(PAGE_SIZE - 1);
  538. n = DIV_ROUND_UP(len, PAGE_SIZE);
  539. p = get_pages_array(n);
  540. if (!p)
  541. return -ENOMEM;
  542. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  543. if (unlikely(res < 0)) {
  544. kvfree(p);
  545. return res;
  546. }
  547. *pages = p;
  548. return (res == n ? len : res * PAGE_SIZE) - *start;
  549. 0;}),({
  550. /* can't be more than PAGE_SIZE */
  551. *start = v.bv_offset;
  552. *pages = p = get_pages_array(1);
  553. if (!p)
  554. return -ENOMEM;
  555. get_page(*p = v.bv_page);
  556. return v.bv_len;
  557. }),({
  558. return -EFAULT;
  559. })
  560. )
  561. return 0;
  562. }
  563. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  564. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  565. struct iov_iter *i)
  566. {
  567. char *to = addr;
  568. __wsum sum, next;
  569. size_t off = 0;
  570. if (unlikely(bytes > i->count))
  571. bytes = i->count;
  572. if (unlikely(!bytes))
  573. return 0;
  574. sum = *csum;
  575. iterate_and_advance(i, bytes, v, ({
  576. int err = 0;
  577. next = csum_and_copy_from_user(v.iov_base,
  578. (to += v.iov_len) - v.iov_len,
  579. v.iov_len, 0, &err);
  580. if (!err) {
  581. sum = csum_block_add(sum, next, off);
  582. off += v.iov_len;
  583. }
  584. err ? v.iov_len : 0;
  585. }), ({
  586. char *p = kmap_atomic(v.bv_page);
  587. next = csum_partial_copy_nocheck(p + v.bv_offset,
  588. (to += v.bv_len) - v.bv_len,
  589. v.bv_len, 0);
  590. kunmap_atomic(p);
  591. sum = csum_block_add(sum, next, off);
  592. off += v.bv_len;
  593. }),({
  594. next = csum_partial_copy_nocheck(v.iov_base,
  595. (to += v.iov_len) - v.iov_len,
  596. v.iov_len, 0);
  597. sum = csum_block_add(sum, next, off);
  598. off += v.iov_len;
  599. })
  600. )
  601. *csum = sum;
  602. return bytes;
  603. }
  604. EXPORT_SYMBOL(csum_and_copy_from_iter);
  605. size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
  606. struct iov_iter *i)
  607. {
  608. char *from = addr;
  609. __wsum sum, next;
  610. size_t off = 0;
  611. if (unlikely(bytes > i->count))
  612. bytes = i->count;
  613. if (unlikely(!bytes))
  614. return 0;
  615. sum = *csum;
  616. iterate_and_advance(i, bytes, v, ({
  617. int err = 0;
  618. next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
  619. v.iov_base,
  620. v.iov_len, 0, &err);
  621. if (!err) {
  622. sum = csum_block_add(sum, next, off);
  623. off += v.iov_len;
  624. }
  625. err ? v.iov_len : 0;
  626. }), ({
  627. char *p = kmap_atomic(v.bv_page);
  628. next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
  629. p + v.bv_offset,
  630. v.bv_len, 0);
  631. kunmap_atomic(p);
  632. sum = csum_block_add(sum, next, off);
  633. off += v.bv_len;
  634. }),({
  635. next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
  636. v.iov_base,
  637. v.iov_len, 0);
  638. sum = csum_block_add(sum, next, off);
  639. off += v.iov_len;
  640. })
  641. )
  642. *csum = sum;
  643. return bytes;
  644. }
  645. EXPORT_SYMBOL(csum_and_copy_to_iter);
  646. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  647. {
  648. size_t size = i->count;
  649. int npages = 0;
  650. if (!size)
  651. return 0;
  652. iterate_all_kinds(i, size, v, ({
  653. unsigned long p = (unsigned long)v.iov_base;
  654. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  655. - p / PAGE_SIZE;
  656. if (npages >= maxpages)
  657. return maxpages;
  658. 0;}),({
  659. npages++;
  660. if (npages >= maxpages)
  661. return maxpages;
  662. }),({
  663. unsigned long p = (unsigned long)v.iov_base;
  664. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  665. - p / PAGE_SIZE;
  666. if (npages >= maxpages)
  667. return maxpages;
  668. })
  669. )
  670. return npages;
  671. }
  672. EXPORT_SYMBOL(iov_iter_npages);