iov_iter.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #include <linux/vmalloc.h>
  6. #include <net/checksum.h>
  7. #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
  8. size_t left; \
  9. size_t wanted = n; \
  10. __p = i->iov; \
  11. __v.iov_len = min(n, __p->iov_len - skip); \
  12. if (likely(__v.iov_len)) { \
  13. __v.iov_base = __p->iov_base + skip; \
  14. left = (STEP); \
  15. __v.iov_len -= left; \
  16. skip += __v.iov_len; \
  17. n -= __v.iov_len; \
  18. } else { \
  19. left = 0; \
  20. } \
  21. while (unlikely(!left && n)) { \
  22. __p++; \
  23. __v.iov_len = min(n, __p->iov_len); \
  24. if (unlikely(!__v.iov_len)) \
  25. continue; \
  26. __v.iov_base = __p->iov_base; \
  27. left = (STEP); \
  28. __v.iov_len -= left; \
  29. skip = __v.iov_len; \
  30. n -= __v.iov_len; \
  31. } \
  32. n = wanted - n; \
  33. }
  34. #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
  35. size_t wanted = n; \
  36. __p = i->kvec; \
  37. __v.iov_len = min(n, __p->iov_len - skip); \
  38. if (likely(__v.iov_len)) { \
  39. __v.iov_base = __p->iov_base + skip; \
  40. (void)(STEP); \
  41. skip += __v.iov_len; \
  42. n -= __v.iov_len; \
  43. } \
  44. while (unlikely(n)) { \
  45. __p++; \
  46. __v.iov_len = min(n, __p->iov_len); \
  47. if (unlikely(!__v.iov_len)) \
  48. continue; \
  49. __v.iov_base = __p->iov_base; \
  50. (void)(STEP); \
  51. skip = __v.iov_len; \
  52. n -= __v.iov_len; \
  53. } \
  54. n = wanted; \
  55. }
  56. #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
  57. size_t wanted = n; \
  58. __p = i->bvec; \
  59. __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
  60. if (likely(__v.bv_len)) { \
  61. __v.bv_page = __p->bv_page; \
  62. __v.bv_offset = __p->bv_offset + skip; \
  63. (void)(STEP); \
  64. skip += __v.bv_len; \
  65. n -= __v.bv_len; \
  66. } \
  67. while (unlikely(n)) { \
  68. __p++; \
  69. __v.bv_len = min_t(size_t, n, __p->bv_len); \
  70. if (unlikely(!__v.bv_len)) \
  71. continue; \
  72. __v.bv_page = __p->bv_page; \
  73. __v.bv_offset = __p->bv_offset; \
  74. (void)(STEP); \
  75. skip = __v.bv_len; \
  76. n -= __v.bv_len; \
  77. } \
  78. n = wanted; \
  79. }
  80. #define iterate_all_kinds(i, n, v, I, B, K) { \
  81. size_t skip = i->iov_offset; \
  82. if (unlikely(i->type & ITER_BVEC)) { \
  83. const struct bio_vec *bvec; \
  84. struct bio_vec v; \
  85. iterate_bvec(i, n, v, bvec, skip, (B)) \
  86. } else if (unlikely(i->type & ITER_KVEC)) { \
  87. const struct kvec *kvec; \
  88. struct kvec v; \
  89. iterate_kvec(i, n, v, kvec, skip, (K)) \
  90. } else { \
  91. const struct iovec *iov; \
  92. struct iovec v; \
  93. iterate_iovec(i, n, v, iov, skip, (I)) \
  94. } \
  95. }
  96. #define iterate_and_advance(i, n, v, I, B, K) { \
  97. size_t skip = i->iov_offset; \
  98. if (unlikely(i->type & ITER_BVEC)) { \
  99. const struct bio_vec *bvec; \
  100. struct bio_vec v; \
  101. iterate_bvec(i, n, v, bvec, skip, (B)) \
  102. if (skip == bvec->bv_len) { \
  103. bvec++; \
  104. skip = 0; \
  105. } \
  106. i->nr_segs -= bvec - i->bvec; \
  107. i->bvec = bvec; \
  108. } else if (unlikely(i->type & ITER_KVEC)) { \
  109. const struct kvec *kvec; \
  110. struct kvec v; \
  111. iterate_kvec(i, n, v, kvec, skip, (K)) \
  112. if (skip == kvec->iov_len) { \
  113. kvec++; \
  114. skip = 0; \
  115. } \
  116. i->nr_segs -= kvec - i->kvec; \
  117. i->kvec = kvec; \
  118. } else { \
  119. const struct iovec *iov; \
  120. struct iovec v; \
  121. iterate_iovec(i, n, v, iov, skip, (I)) \
  122. if (skip == iov->iov_len) { \
  123. iov++; \
  124. skip = 0; \
  125. } \
  126. i->nr_segs -= iov - i->iov; \
  127. i->iov = iov; \
  128. } \
  129. i->count -= n; \
  130. i->iov_offset = skip; \
  131. }
  132. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  133. struct iov_iter *i)
  134. {
  135. size_t skip, copy, left, wanted;
  136. const struct iovec *iov;
  137. char __user *buf;
  138. void *kaddr, *from;
  139. if (unlikely(bytes > i->count))
  140. bytes = i->count;
  141. if (unlikely(!bytes))
  142. return 0;
  143. wanted = bytes;
  144. iov = i->iov;
  145. skip = i->iov_offset;
  146. buf = iov->iov_base + skip;
  147. copy = min(bytes, iov->iov_len - skip);
  148. if (!fault_in_pages_writeable(buf, copy)) {
  149. kaddr = kmap_atomic(page);
  150. from = kaddr + offset;
  151. /* first chunk, usually the only one */
  152. left = __copy_to_user_inatomic(buf, from, copy);
  153. copy -= left;
  154. skip += copy;
  155. from += copy;
  156. bytes -= copy;
  157. while (unlikely(!left && bytes)) {
  158. iov++;
  159. buf = iov->iov_base;
  160. copy = min(bytes, iov->iov_len);
  161. left = __copy_to_user_inatomic(buf, from, copy);
  162. copy -= left;
  163. skip = copy;
  164. from += copy;
  165. bytes -= copy;
  166. }
  167. if (likely(!bytes)) {
  168. kunmap_atomic(kaddr);
  169. goto done;
  170. }
  171. offset = from - kaddr;
  172. buf += copy;
  173. kunmap_atomic(kaddr);
  174. copy = min(bytes, iov->iov_len - skip);
  175. }
  176. /* Too bad - revert to non-atomic kmap */
  177. kaddr = kmap(page);
  178. from = kaddr + offset;
  179. left = __copy_to_user(buf, from, copy);
  180. copy -= left;
  181. skip += copy;
  182. from += copy;
  183. bytes -= copy;
  184. while (unlikely(!left && bytes)) {
  185. iov++;
  186. buf = iov->iov_base;
  187. copy = min(bytes, iov->iov_len);
  188. left = __copy_to_user(buf, from, copy);
  189. copy -= left;
  190. skip = copy;
  191. from += copy;
  192. bytes -= copy;
  193. }
  194. kunmap(page);
  195. done:
  196. if (skip == iov->iov_len) {
  197. iov++;
  198. skip = 0;
  199. }
  200. i->count -= wanted - bytes;
  201. i->nr_segs -= iov - i->iov;
  202. i->iov = iov;
  203. i->iov_offset = skip;
  204. return wanted - bytes;
  205. }
  206. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  207. struct iov_iter *i)
  208. {
  209. size_t skip, copy, left, wanted;
  210. const struct iovec *iov;
  211. char __user *buf;
  212. void *kaddr, *to;
  213. if (unlikely(bytes > i->count))
  214. bytes = i->count;
  215. if (unlikely(!bytes))
  216. return 0;
  217. wanted = bytes;
  218. iov = i->iov;
  219. skip = i->iov_offset;
  220. buf = iov->iov_base + skip;
  221. copy = min(bytes, iov->iov_len - skip);
  222. if (!fault_in_pages_readable(buf, copy)) {
  223. kaddr = kmap_atomic(page);
  224. to = kaddr + offset;
  225. /* first chunk, usually the only one */
  226. left = __copy_from_user_inatomic(to, buf, copy);
  227. copy -= left;
  228. skip += copy;
  229. to += copy;
  230. bytes -= copy;
  231. while (unlikely(!left && bytes)) {
  232. iov++;
  233. buf = iov->iov_base;
  234. copy = min(bytes, iov->iov_len);
  235. left = __copy_from_user_inatomic(to, buf, copy);
  236. copy -= left;
  237. skip = copy;
  238. to += copy;
  239. bytes -= copy;
  240. }
  241. if (likely(!bytes)) {
  242. kunmap_atomic(kaddr);
  243. goto done;
  244. }
  245. offset = to - kaddr;
  246. buf += copy;
  247. kunmap_atomic(kaddr);
  248. copy = min(bytes, iov->iov_len - skip);
  249. }
  250. /* Too bad - revert to non-atomic kmap */
  251. kaddr = kmap(page);
  252. to = kaddr + offset;
  253. left = __copy_from_user(to, buf, copy);
  254. copy -= left;
  255. skip += copy;
  256. to += copy;
  257. bytes -= copy;
  258. while (unlikely(!left && bytes)) {
  259. iov++;
  260. buf = iov->iov_base;
  261. copy = min(bytes, iov->iov_len);
  262. left = __copy_from_user(to, buf, copy);
  263. copy -= left;
  264. skip = copy;
  265. to += copy;
  266. bytes -= copy;
  267. }
  268. kunmap(page);
  269. done:
  270. if (skip == iov->iov_len) {
  271. iov++;
  272. skip = 0;
  273. }
  274. i->count -= wanted - bytes;
  275. i->nr_segs -= iov - i->iov;
  276. i->iov = iov;
  277. i->iov_offset = skip;
  278. return wanted - bytes;
  279. }
  280. /*
  281. * Fault in the first iovec of the given iov_iter, to a maximum length
  282. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  283. * accessed (ie. because it is an invalid address).
  284. *
  285. * writev-intensive code may want this to prefault several iovecs -- that
  286. * would be possible (callers must not rely on the fact that _only_ the
  287. * first iovec will be faulted with the current implementation).
  288. */
  289. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  290. {
  291. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  292. char __user *buf = i->iov->iov_base + i->iov_offset;
  293. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  294. return fault_in_pages_readable(buf, bytes);
  295. }
  296. return 0;
  297. }
  298. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  299. /*
  300. * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  301. * bytes. For each iovec, fault in each page that constitutes the iovec.
  302. *
  303. * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  304. * because it is an invalid address).
  305. */
  306. int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
  307. {
  308. size_t skip = i->iov_offset;
  309. const struct iovec *iov;
  310. int err;
  311. struct iovec v;
  312. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  313. iterate_iovec(i, bytes, v, iov, skip, ({
  314. err = fault_in_multipages_readable(v.iov_base,
  315. v.iov_len);
  316. if (unlikely(err))
  317. return err;
  318. 0;}))
  319. }
  320. return 0;
  321. }
  322. EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
  323. void iov_iter_init(struct iov_iter *i, int direction,
  324. const struct iovec *iov, unsigned long nr_segs,
  325. size_t count)
  326. {
  327. /* It will get better. Eventually... */
  328. if (segment_eq(get_fs(), KERNEL_DS)) {
  329. direction |= ITER_KVEC;
  330. i->type = direction;
  331. i->kvec = (struct kvec *)iov;
  332. } else {
  333. i->type = direction;
  334. i->iov = iov;
  335. }
  336. i->nr_segs = nr_segs;
  337. i->iov_offset = 0;
  338. i->count = count;
  339. }
  340. EXPORT_SYMBOL(iov_iter_init);
  341. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  342. {
  343. char *from = kmap_atomic(page);
  344. memcpy(to, from + offset, len);
  345. kunmap_atomic(from);
  346. }
  347. static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
  348. {
  349. char *to = kmap_atomic(page);
  350. memcpy(to + offset, from, len);
  351. kunmap_atomic(to);
  352. }
  353. static void memzero_page(struct page *page, size_t offset, size_t len)
  354. {
  355. char *addr = kmap_atomic(page);
  356. memset(addr + offset, 0, len);
  357. kunmap_atomic(addr);
  358. }
  359. size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
  360. {
  361. char *from = addr;
  362. if (unlikely(bytes > i->count))
  363. bytes = i->count;
  364. if (unlikely(!bytes))
  365. return 0;
  366. iterate_and_advance(i, bytes, v,
  367. __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
  368. v.iov_len),
  369. memcpy_to_page(v.bv_page, v.bv_offset,
  370. (from += v.bv_len) - v.bv_len, v.bv_len),
  371. memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
  372. )
  373. return bytes;
  374. }
  375. EXPORT_SYMBOL(copy_to_iter);
  376. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  377. {
  378. char *to = addr;
  379. if (unlikely(bytes > i->count))
  380. bytes = i->count;
  381. if (unlikely(!bytes))
  382. return 0;
  383. iterate_and_advance(i, bytes, v,
  384. __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
  385. v.iov_len),
  386. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  387. v.bv_offset, v.bv_len),
  388. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  389. )
  390. return bytes;
  391. }
  392. EXPORT_SYMBOL(copy_from_iter);
  393. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  394. {
  395. char *to = addr;
  396. if (unlikely(bytes > i->count))
  397. bytes = i->count;
  398. if (unlikely(!bytes))
  399. return 0;
  400. iterate_and_advance(i, bytes, v,
  401. __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  402. v.iov_base, v.iov_len),
  403. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  404. v.bv_offset, v.bv_len),
  405. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  406. )
  407. return bytes;
  408. }
  409. EXPORT_SYMBOL(copy_from_iter_nocache);
  410. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  411. struct iov_iter *i)
  412. {
  413. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  414. void *kaddr = kmap_atomic(page);
  415. size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  416. kunmap_atomic(kaddr);
  417. return wanted;
  418. } else
  419. return copy_page_to_iter_iovec(page, offset, bytes, i);
  420. }
  421. EXPORT_SYMBOL(copy_page_to_iter);
  422. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  423. struct iov_iter *i)
  424. {
  425. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  426. void *kaddr = kmap_atomic(page);
  427. size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
  428. kunmap_atomic(kaddr);
  429. return wanted;
  430. } else
  431. return copy_page_from_iter_iovec(page, offset, bytes, i);
  432. }
  433. EXPORT_SYMBOL(copy_page_from_iter);
  434. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  435. {
  436. if (unlikely(bytes > i->count))
  437. bytes = i->count;
  438. if (unlikely(!bytes))
  439. return 0;
  440. iterate_and_advance(i, bytes, v,
  441. __clear_user(v.iov_base, v.iov_len),
  442. memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  443. memset(v.iov_base, 0, v.iov_len)
  444. )
  445. return bytes;
  446. }
  447. EXPORT_SYMBOL(iov_iter_zero);
  448. size_t iov_iter_copy_from_user_atomic(struct page *page,
  449. struct iov_iter *i, unsigned long offset, size_t bytes)
  450. {
  451. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  452. iterate_all_kinds(i, bytes, v,
  453. __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  454. v.iov_base, v.iov_len),
  455. memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
  456. v.bv_offset, v.bv_len),
  457. memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  458. )
  459. kunmap_atomic(kaddr);
  460. return bytes;
  461. }
  462. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  463. void iov_iter_advance(struct iov_iter *i, size_t size)
  464. {
  465. iterate_and_advance(i, size, v, 0, 0, 0)
  466. }
  467. EXPORT_SYMBOL(iov_iter_advance);
  468. /*
  469. * Return the count of just the current iov_iter segment.
  470. */
  471. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  472. {
  473. if (i->nr_segs == 1)
  474. return i->count;
  475. else if (i->type & ITER_BVEC)
  476. return min(i->count, i->bvec->bv_len - i->iov_offset);
  477. else
  478. return min(i->count, i->iov->iov_len - i->iov_offset);
  479. }
  480. EXPORT_SYMBOL(iov_iter_single_seg_count);
  481. void iov_iter_kvec(struct iov_iter *i, int direction,
  482. const struct kvec *kvec, unsigned long nr_segs,
  483. size_t count)
  484. {
  485. BUG_ON(!(direction & ITER_KVEC));
  486. i->type = direction;
  487. i->kvec = kvec;
  488. i->nr_segs = nr_segs;
  489. i->iov_offset = 0;
  490. i->count = count;
  491. }
  492. EXPORT_SYMBOL(iov_iter_kvec);
  493. void iov_iter_bvec(struct iov_iter *i, int direction,
  494. const struct bio_vec *bvec, unsigned long nr_segs,
  495. size_t count)
  496. {
  497. BUG_ON(!(direction & ITER_BVEC));
  498. i->type = direction;
  499. i->bvec = bvec;
  500. i->nr_segs = nr_segs;
  501. i->iov_offset = 0;
  502. i->count = count;
  503. }
  504. EXPORT_SYMBOL(iov_iter_bvec);
  505. unsigned long iov_iter_alignment(const struct iov_iter *i)
  506. {
  507. unsigned long res = 0;
  508. size_t size = i->count;
  509. if (!size)
  510. return 0;
  511. iterate_all_kinds(i, size, v,
  512. (res |= (unsigned long)v.iov_base | v.iov_len, 0),
  513. res |= v.bv_offset | v.bv_len,
  514. res |= (unsigned long)v.iov_base | v.iov_len
  515. )
  516. return res;
  517. }
  518. EXPORT_SYMBOL(iov_iter_alignment);
  519. ssize_t iov_iter_get_pages(struct iov_iter *i,
  520. struct page **pages, size_t maxsize, unsigned maxpages,
  521. size_t *start)
  522. {
  523. if (maxsize > i->count)
  524. maxsize = i->count;
  525. if (!maxsize)
  526. return 0;
  527. iterate_all_kinds(i, maxsize, v, ({
  528. unsigned long addr = (unsigned long)v.iov_base;
  529. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  530. int n;
  531. int res;
  532. if (len > maxpages * PAGE_SIZE)
  533. len = maxpages * PAGE_SIZE;
  534. addr &= ~(PAGE_SIZE - 1);
  535. n = DIV_ROUND_UP(len, PAGE_SIZE);
  536. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  537. if (unlikely(res < 0))
  538. return res;
  539. return (res == n ? len : res * PAGE_SIZE) - *start;
  540. 0;}),({
  541. /* can't be more than PAGE_SIZE */
  542. *start = v.bv_offset;
  543. get_page(*pages = v.bv_page);
  544. return v.bv_len;
  545. }),({
  546. return -EFAULT;
  547. })
  548. )
  549. return 0;
  550. }
  551. EXPORT_SYMBOL(iov_iter_get_pages);
  552. static struct page **get_pages_array(size_t n)
  553. {
  554. struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  555. if (!p)
  556. p = vmalloc(n * sizeof(struct page *));
  557. return p;
  558. }
  559. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  560. struct page ***pages, size_t maxsize,
  561. size_t *start)
  562. {
  563. struct page **p;
  564. if (maxsize > i->count)
  565. maxsize = i->count;
  566. if (!maxsize)
  567. return 0;
  568. iterate_all_kinds(i, maxsize, v, ({
  569. unsigned long addr = (unsigned long)v.iov_base;
  570. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  571. int n;
  572. int res;
  573. addr &= ~(PAGE_SIZE - 1);
  574. n = DIV_ROUND_UP(len, PAGE_SIZE);
  575. p = get_pages_array(n);
  576. if (!p)
  577. return -ENOMEM;
  578. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  579. if (unlikely(res < 0)) {
  580. kvfree(p);
  581. return res;
  582. }
  583. *pages = p;
  584. return (res == n ? len : res * PAGE_SIZE) - *start;
  585. 0;}),({
  586. /* can't be more than PAGE_SIZE */
  587. *start = v.bv_offset;
  588. *pages = p = get_pages_array(1);
  589. if (!p)
  590. return -ENOMEM;
  591. get_page(*p = v.bv_page);
  592. return v.bv_len;
  593. }),({
  594. return -EFAULT;
  595. })
  596. )
  597. return 0;
  598. }
  599. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  600. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  601. struct iov_iter *i)
  602. {
  603. char *to = addr;
  604. __wsum sum, next;
  605. size_t off = 0;
  606. if (unlikely(bytes > i->count))
  607. bytes = i->count;
  608. if (unlikely(!bytes))
  609. return 0;
  610. sum = *csum;
  611. iterate_and_advance(i, bytes, v, ({
  612. int err = 0;
  613. next = csum_and_copy_from_user(v.iov_base,
  614. (to += v.iov_len) - v.iov_len,
  615. v.iov_len, 0, &err);
  616. if (!err) {
  617. sum = csum_block_add(sum, next, off);
  618. off += v.iov_len;
  619. }
  620. err ? v.iov_len : 0;
  621. }), ({
  622. char *p = kmap_atomic(v.bv_page);
  623. next = csum_partial_copy_nocheck(p + v.bv_offset,
  624. (to += v.bv_len) - v.bv_len,
  625. v.bv_len, 0);
  626. kunmap_atomic(p);
  627. sum = csum_block_add(sum, next, off);
  628. off += v.bv_len;
  629. }),({
  630. next = csum_partial_copy_nocheck(v.iov_base,
  631. (to += v.iov_len) - v.iov_len,
  632. v.iov_len, 0);
  633. sum = csum_block_add(sum, next, off);
  634. off += v.iov_len;
  635. })
  636. )
  637. *csum = sum;
  638. return bytes;
  639. }
  640. EXPORT_SYMBOL(csum_and_copy_from_iter);
  641. size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
  642. struct iov_iter *i)
  643. {
  644. char *from = addr;
  645. __wsum sum, next;
  646. size_t off = 0;
  647. if (unlikely(bytes > i->count))
  648. bytes = i->count;
  649. if (unlikely(!bytes))
  650. return 0;
  651. sum = *csum;
  652. iterate_and_advance(i, bytes, v, ({
  653. int err = 0;
  654. next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
  655. v.iov_base,
  656. v.iov_len, 0, &err);
  657. if (!err) {
  658. sum = csum_block_add(sum, next, off);
  659. off += v.iov_len;
  660. }
  661. err ? v.iov_len : 0;
  662. }), ({
  663. char *p = kmap_atomic(v.bv_page);
  664. next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
  665. p + v.bv_offset,
  666. v.bv_len, 0);
  667. kunmap_atomic(p);
  668. sum = csum_block_add(sum, next, off);
  669. off += v.bv_len;
  670. }),({
  671. next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
  672. v.iov_base,
  673. v.iov_len, 0);
  674. sum = csum_block_add(sum, next, off);
  675. off += v.iov_len;
  676. })
  677. )
  678. *csum = sum;
  679. return bytes;
  680. }
  681. EXPORT_SYMBOL(csum_and_copy_to_iter);
  682. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  683. {
  684. size_t size = i->count;
  685. int npages = 0;
  686. if (!size)
  687. return 0;
  688. iterate_all_kinds(i, size, v, ({
  689. unsigned long p = (unsigned long)v.iov_base;
  690. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  691. - p / PAGE_SIZE;
  692. if (npages >= maxpages)
  693. return maxpages;
  694. 0;}),({
  695. npages++;
  696. if (npages >= maxpages)
  697. return maxpages;
  698. }),({
  699. unsigned long p = (unsigned long)v.iov_base;
  700. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  701. - p / PAGE_SIZE;
  702. if (npages >= maxpages)
  703. return maxpages;
  704. })
  705. )
  706. return npages;
  707. }
  708. EXPORT_SYMBOL(iov_iter_npages);
  709. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  710. {
  711. *new = *old;
  712. if (new->type & ITER_BVEC)
  713. return new->bvec = kmemdup(new->bvec,
  714. new->nr_segs * sizeof(struct bio_vec),
  715. flags);
  716. else
  717. /* iovec and kvec have identical layout */
  718. return new->iov = kmemdup(new->iov,
  719. new->nr_segs * sizeof(struct iovec),
  720. flags);
  721. }
  722. EXPORT_SYMBOL(dup_iter);
  723. int import_iovec(int type, const struct iovec __user * uvector,
  724. unsigned nr_segs, unsigned fast_segs,
  725. struct iovec **iov, struct iov_iter *i)
  726. {
  727. ssize_t n;
  728. struct iovec *p;
  729. n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  730. *iov, &p);
  731. if (n < 0) {
  732. if (p != *iov)
  733. kfree(p);
  734. *iov = NULL;
  735. return n;
  736. }
  737. iov_iter_init(i, type, p, nr_segs, n);
  738. *iov = p == *iov ? NULL : p;
  739. return 0;
  740. }
  741. EXPORT_SYMBOL(import_iovec);
  742. #ifdef CONFIG_COMPAT
  743. #include <linux/compat.h>
  744. int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  745. unsigned nr_segs, unsigned fast_segs,
  746. struct iovec **iov, struct iov_iter *i)
  747. {
  748. ssize_t n;
  749. struct iovec *p;
  750. n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  751. *iov, &p);
  752. if (n < 0) {
  753. if (p != *iov)
  754. kfree(p);
  755. *iov = NULL;
  756. return n;
  757. }
  758. iov_iter_init(i, type, p, nr_segs, n);
  759. *iov = p == *iov ? NULL : p;
  760. return 0;
  761. }
  762. #endif
  763. int import_single_range(int rw, void __user *buf, size_t len,
  764. struct iovec *iov, struct iov_iter *i)
  765. {
  766. if (len > MAX_RW_COUNT)
  767. len = MAX_RW_COUNT;
  768. if (unlikely(!access_ok(!rw, buf, len)))
  769. return -EFAULT;
  770. iov->iov_base = buf;
  771. iov->iov_len = len;
  772. iov_iter_init(i, rw, iov, 1, len);
  773. return 0;
  774. }