iov_iter.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #include <linux/vmalloc.h>
  6. #include <net/checksum.h>
  7. #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
  8. size_t left; \
  9. size_t wanted = n; \
  10. __p = i->iov; \
  11. __v.iov_len = min(n, __p->iov_len - skip); \
  12. if (likely(__v.iov_len)) { \
  13. __v.iov_base = __p->iov_base + skip; \
  14. left = (STEP); \
  15. __v.iov_len -= left; \
  16. skip += __v.iov_len; \
  17. n -= __v.iov_len; \
  18. } else { \
  19. left = 0; \
  20. } \
  21. while (unlikely(!left && n)) { \
  22. __p++; \
  23. __v.iov_len = min(n, __p->iov_len); \
  24. if (unlikely(!__v.iov_len)) \
  25. continue; \
  26. __v.iov_base = __p->iov_base; \
  27. left = (STEP); \
  28. __v.iov_len -= left; \
  29. skip = __v.iov_len; \
  30. n -= __v.iov_len; \
  31. } \
  32. n = wanted - n; \
  33. }
  34. #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
  35. size_t wanted = n; \
  36. __p = i->kvec; \
  37. __v.iov_len = min(n, __p->iov_len - skip); \
  38. if (likely(__v.iov_len)) { \
  39. __v.iov_base = __p->iov_base + skip; \
  40. (void)(STEP); \
  41. skip += __v.iov_len; \
  42. n -= __v.iov_len; \
  43. } \
  44. while (unlikely(n)) { \
  45. __p++; \
  46. __v.iov_len = min(n, __p->iov_len); \
  47. if (unlikely(!__v.iov_len)) \
  48. continue; \
  49. __v.iov_base = __p->iov_base; \
  50. (void)(STEP); \
  51. skip = __v.iov_len; \
  52. n -= __v.iov_len; \
  53. } \
  54. n = wanted; \
  55. }
  56. #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
  57. size_t wanted = n; \
  58. __p = i->bvec; \
  59. __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
  60. if (likely(__v.bv_len)) { \
  61. __v.bv_page = __p->bv_page; \
  62. __v.bv_offset = __p->bv_offset + skip; \
  63. (void)(STEP); \
  64. skip += __v.bv_len; \
  65. n -= __v.bv_len; \
  66. } \
  67. while (unlikely(n)) { \
  68. __p++; \
  69. __v.bv_len = min_t(size_t, n, __p->bv_len); \
  70. if (unlikely(!__v.bv_len)) \
  71. continue; \
  72. __v.bv_page = __p->bv_page; \
  73. __v.bv_offset = __p->bv_offset; \
  74. (void)(STEP); \
  75. skip = __v.bv_len; \
  76. n -= __v.bv_len; \
  77. } \
  78. n = wanted; \
  79. }
  80. #define iterate_all_kinds(i, n, v, I, B, K) { \
  81. size_t skip = i->iov_offset; \
  82. if (unlikely(i->type & ITER_BVEC)) { \
  83. const struct bio_vec *bvec; \
  84. struct bio_vec v; \
  85. iterate_bvec(i, n, v, bvec, skip, (B)) \
  86. } else if (unlikely(i->type & ITER_KVEC)) { \
  87. const struct kvec *kvec; \
  88. struct kvec v; \
  89. iterate_kvec(i, n, v, kvec, skip, (K)) \
  90. } else { \
  91. const struct iovec *iov; \
  92. struct iovec v; \
  93. iterate_iovec(i, n, v, iov, skip, (I)) \
  94. } \
  95. }
  96. #define iterate_and_advance(i, n, v, I, B, K) { \
  97. size_t skip = i->iov_offset; \
  98. if (unlikely(i->type & ITER_BVEC)) { \
  99. const struct bio_vec *bvec; \
  100. struct bio_vec v; \
  101. iterate_bvec(i, n, v, bvec, skip, (B)) \
  102. if (skip == bvec->bv_len) { \
  103. bvec++; \
  104. skip = 0; \
  105. } \
  106. i->nr_segs -= bvec - i->bvec; \
  107. i->bvec = bvec; \
  108. } else if (unlikely(i->type & ITER_KVEC)) { \
  109. const struct kvec *kvec; \
  110. struct kvec v; \
  111. iterate_kvec(i, n, v, kvec, skip, (K)) \
  112. if (skip == kvec->iov_len) { \
  113. kvec++; \
  114. skip = 0; \
  115. } \
  116. i->nr_segs -= kvec - i->kvec; \
  117. i->kvec = kvec; \
  118. } else { \
  119. const struct iovec *iov; \
  120. struct iovec v; \
  121. iterate_iovec(i, n, v, iov, skip, (I)) \
  122. if (skip == iov->iov_len) { \
  123. iov++; \
  124. skip = 0; \
  125. } \
  126. i->nr_segs -= iov - i->iov; \
  127. i->iov = iov; \
  128. } \
  129. i->count -= n; \
  130. i->iov_offset = skip; \
  131. }
  132. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  133. struct iov_iter *i)
  134. {
  135. size_t skip, copy, left, wanted;
  136. const struct iovec *iov;
  137. char __user *buf;
  138. void *kaddr, *from;
  139. if (unlikely(bytes > i->count))
  140. bytes = i->count;
  141. if (unlikely(!bytes))
  142. return 0;
  143. wanted = bytes;
  144. iov = i->iov;
  145. skip = i->iov_offset;
  146. buf = iov->iov_base + skip;
  147. copy = min(bytes, iov->iov_len - skip);
  148. if (!fault_in_pages_writeable(buf, copy)) {
  149. kaddr = kmap_atomic(page);
  150. from = kaddr + offset;
  151. /* first chunk, usually the only one */
  152. left = __copy_to_user_inatomic(buf, from, copy);
  153. copy -= left;
  154. skip += copy;
  155. from += copy;
  156. bytes -= copy;
  157. while (unlikely(!left && bytes)) {
  158. iov++;
  159. buf = iov->iov_base;
  160. copy = min(bytes, iov->iov_len);
  161. left = __copy_to_user_inatomic(buf, from, copy);
  162. copy -= left;
  163. skip = copy;
  164. from += copy;
  165. bytes -= copy;
  166. }
  167. if (likely(!bytes)) {
  168. kunmap_atomic(kaddr);
  169. goto done;
  170. }
  171. offset = from - kaddr;
  172. buf += copy;
  173. kunmap_atomic(kaddr);
  174. copy = min(bytes, iov->iov_len - skip);
  175. }
  176. /* Too bad - revert to non-atomic kmap */
  177. kaddr = kmap(page);
  178. from = kaddr + offset;
  179. left = __copy_to_user(buf, from, copy);
  180. copy -= left;
  181. skip += copy;
  182. from += copy;
  183. bytes -= copy;
  184. while (unlikely(!left && bytes)) {
  185. iov++;
  186. buf = iov->iov_base;
  187. copy = min(bytes, iov->iov_len);
  188. left = __copy_to_user(buf, from, copy);
  189. copy -= left;
  190. skip = copy;
  191. from += copy;
  192. bytes -= copy;
  193. }
  194. kunmap(page);
  195. done:
  196. if (skip == iov->iov_len) {
  197. iov++;
  198. skip = 0;
  199. }
  200. i->count -= wanted - bytes;
  201. i->nr_segs -= iov - i->iov;
  202. i->iov = iov;
  203. i->iov_offset = skip;
  204. return wanted - bytes;
  205. }
  206. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  207. struct iov_iter *i)
  208. {
  209. size_t skip, copy, left, wanted;
  210. const struct iovec *iov;
  211. char __user *buf;
  212. void *kaddr, *to;
  213. if (unlikely(bytes > i->count))
  214. bytes = i->count;
  215. if (unlikely(!bytes))
  216. return 0;
  217. wanted = bytes;
  218. iov = i->iov;
  219. skip = i->iov_offset;
  220. buf = iov->iov_base + skip;
  221. copy = min(bytes, iov->iov_len - skip);
  222. if (!fault_in_pages_readable(buf, copy)) {
  223. kaddr = kmap_atomic(page);
  224. to = kaddr + offset;
  225. /* first chunk, usually the only one */
  226. left = __copy_from_user_inatomic(to, buf, copy);
  227. copy -= left;
  228. skip += copy;
  229. to += copy;
  230. bytes -= copy;
  231. while (unlikely(!left && bytes)) {
  232. iov++;
  233. buf = iov->iov_base;
  234. copy = min(bytes, iov->iov_len);
  235. left = __copy_from_user_inatomic(to, buf, copy);
  236. copy -= left;
  237. skip = copy;
  238. to += copy;
  239. bytes -= copy;
  240. }
  241. if (likely(!bytes)) {
  242. kunmap_atomic(kaddr);
  243. goto done;
  244. }
  245. offset = to - kaddr;
  246. buf += copy;
  247. kunmap_atomic(kaddr);
  248. copy = min(bytes, iov->iov_len - skip);
  249. }
  250. /* Too bad - revert to non-atomic kmap */
  251. kaddr = kmap(page);
  252. to = kaddr + offset;
  253. left = __copy_from_user(to, buf, copy);
  254. copy -= left;
  255. skip += copy;
  256. to += copy;
  257. bytes -= copy;
  258. while (unlikely(!left && bytes)) {
  259. iov++;
  260. buf = iov->iov_base;
  261. copy = min(bytes, iov->iov_len);
  262. left = __copy_from_user(to, buf, copy);
  263. copy -= left;
  264. skip = copy;
  265. to += copy;
  266. bytes -= copy;
  267. }
  268. kunmap(page);
  269. done:
  270. if (skip == iov->iov_len) {
  271. iov++;
  272. skip = 0;
  273. }
  274. i->count -= wanted - bytes;
  275. i->nr_segs -= iov - i->iov;
  276. i->iov = iov;
  277. i->iov_offset = skip;
  278. return wanted - bytes;
  279. }
  280. /*
  281. * Fault in the first iovec of the given iov_iter, to a maximum length
  282. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  283. * accessed (ie. because it is an invalid address).
  284. *
  285. * writev-intensive code may want this to prefault several iovecs -- that
  286. * would be possible (callers must not rely on the fact that _only_ the
  287. * first iovec will be faulted with the current implementation).
  288. */
  289. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  290. {
  291. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  292. char __user *buf = i->iov->iov_base + i->iov_offset;
  293. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  294. return fault_in_pages_readable(buf, bytes);
  295. }
  296. return 0;
  297. }
  298. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  299. /*
  300. * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  301. * bytes. For each iovec, fault in each page that constitutes the iovec.
  302. *
  303. * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  304. * because it is an invalid address).
  305. */
  306. int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
  307. {
  308. size_t skip = i->iov_offset;
  309. const struct iovec *iov;
  310. int err;
  311. struct iovec v;
  312. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  313. iterate_iovec(i, bytes, v, iov, skip, ({
  314. err = fault_in_multipages_readable(v.iov_base,
  315. v.iov_len);
  316. if (unlikely(err))
  317. return err;
  318. 0;}))
  319. }
  320. return 0;
  321. }
  322. EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
  323. void iov_iter_init(struct iov_iter *i, int direction,
  324. const struct iovec *iov, unsigned long nr_segs,
  325. size_t count)
  326. {
  327. /* It will get better. Eventually... */
  328. if (segment_eq(get_fs(), KERNEL_DS)) {
  329. direction |= ITER_KVEC;
  330. i->type = direction;
  331. i->kvec = (struct kvec *)iov;
  332. } else {
  333. i->type = direction;
  334. i->iov = iov;
  335. }
  336. i->nr_segs = nr_segs;
  337. i->iov_offset = 0;
  338. i->count = count;
  339. }
  340. EXPORT_SYMBOL(iov_iter_init);
  341. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  342. {
  343. char *from = kmap_atomic(page);
  344. memcpy(to, from + offset, len);
  345. kunmap_atomic(from);
  346. }
  347. static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
  348. {
  349. char *to = kmap_atomic(page);
  350. memcpy(to + offset, from, len);
  351. kunmap_atomic(to);
  352. }
  353. static void memzero_page(struct page *page, size_t offset, size_t len)
  354. {
  355. char *addr = kmap_atomic(page);
  356. memset(addr + offset, 0, len);
  357. kunmap_atomic(addr);
  358. }
  359. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  360. {
  361. const char *from = addr;
  362. if (unlikely(bytes > i->count))
  363. bytes = i->count;
  364. if (unlikely(!bytes))
  365. return 0;
  366. iterate_and_advance(i, bytes, v,
  367. __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
  368. v.iov_len),
  369. memcpy_to_page(v.bv_page, v.bv_offset,
  370. (from += v.bv_len) - v.bv_len, v.bv_len),
  371. memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
  372. )
  373. return bytes;
  374. }
  375. EXPORT_SYMBOL(copy_to_iter);
  376. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  377. {
  378. char *to = addr;
  379. if (unlikely(bytes > i->count))
  380. bytes = i->count;
  381. if (unlikely(!bytes))
  382. return 0;
  383. iterate_and_advance(i, bytes, v,
  384. __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
  385. v.iov_len),
  386. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  387. v.bv_offset, v.bv_len),
  388. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  389. )
  390. return bytes;
  391. }
  392. EXPORT_SYMBOL(copy_from_iter);
  393. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  394. {
  395. char *to = addr;
  396. if (unlikely(bytes > i->count))
  397. bytes = i->count;
  398. if (unlikely(!bytes))
  399. return 0;
  400. iterate_and_advance(i, bytes, v,
  401. __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  402. v.iov_base, v.iov_len),
  403. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  404. v.bv_offset, v.bv_len),
  405. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  406. )
  407. return bytes;
  408. }
  409. EXPORT_SYMBOL(copy_from_iter_nocache);
  410. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  411. struct iov_iter *i)
  412. {
  413. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  414. void *kaddr = kmap_atomic(page);
  415. size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  416. kunmap_atomic(kaddr);
  417. return wanted;
  418. } else
  419. return copy_page_to_iter_iovec(page, offset, bytes, i);
  420. }
  421. EXPORT_SYMBOL(copy_page_to_iter);
  422. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  423. struct iov_iter *i)
  424. {
  425. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  426. void *kaddr = kmap_atomic(page);
  427. size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
  428. kunmap_atomic(kaddr);
  429. return wanted;
  430. } else
  431. return copy_page_from_iter_iovec(page, offset, bytes, i);
  432. }
  433. EXPORT_SYMBOL(copy_page_from_iter);
  434. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  435. {
  436. if (unlikely(bytes > i->count))
  437. bytes = i->count;
  438. if (unlikely(!bytes))
  439. return 0;
  440. iterate_and_advance(i, bytes, v,
  441. __clear_user(v.iov_base, v.iov_len),
  442. memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  443. memset(v.iov_base, 0, v.iov_len)
  444. )
  445. return bytes;
  446. }
  447. EXPORT_SYMBOL(iov_iter_zero);
  448. size_t iov_iter_copy_from_user_atomic(struct page *page,
  449. struct iov_iter *i, unsigned long offset, size_t bytes)
  450. {
  451. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  452. iterate_all_kinds(i, bytes, v,
  453. __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  454. v.iov_base, v.iov_len),
  455. memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
  456. v.bv_offset, v.bv_len),
  457. memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  458. )
  459. kunmap_atomic(kaddr);
  460. return bytes;
  461. }
  462. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  463. void iov_iter_advance(struct iov_iter *i, size_t size)
  464. {
  465. iterate_and_advance(i, size, v, 0, 0, 0)
  466. }
  467. EXPORT_SYMBOL(iov_iter_advance);
  468. /*
  469. * Return the count of just the current iov_iter segment.
  470. */
  471. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  472. {
  473. if (i->nr_segs == 1)
  474. return i->count;
  475. else if (i->type & ITER_BVEC)
  476. return min(i->count, i->bvec->bv_len - i->iov_offset);
  477. else
  478. return min(i->count, i->iov->iov_len - i->iov_offset);
  479. }
  480. EXPORT_SYMBOL(iov_iter_single_seg_count);
  481. void iov_iter_kvec(struct iov_iter *i, int direction,
  482. const struct kvec *kvec, unsigned long nr_segs,
  483. size_t count)
  484. {
  485. BUG_ON(!(direction & ITER_KVEC));
  486. i->type = direction;
  487. i->kvec = kvec;
  488. i->nr_segs = nr_segs;
  489. i->iov_offset = 0;
  490. i->count = count;
  491. }
  492. EXPORT_SYMBOL(iov_iter_kvec);
  493. void iov_iter_bvec(struct iov_iter *i, int direction,
  494. const struct bio_vec *bvec, unsigned long nr_segs,
  495. size_t count)
  496. {
  497. BUG_ON(!(direction & ITER_BVEC));
  498. i->type = direction;
  499. i->bvec = bvec;
  500. i->nr_segs = nr_segs;
  501. i->iov_offset = 0;
  502. i->count = count;
  503. }
  504. EXPORT_SYMBOL(iov_iter_bvec);
  505. unsigned long iov_iter_alignment(const struct iov_iter *i)
  506. {
  507. unsigned long res = 0;
  508. size_t size = i->count;
  509. if (!size)
  510. return 0;
  511. iterate_all_kinds(i, size, v,
  512. (res |= (unsigned long)v.iov_base | v.iov_len, 0),
  513. res |= v.bv_offset | v.bv_len,
  514. res |= (unsigned long)v.iov_base | v.iov_len
  515. )
  516. return res;
  517. }
  518. EXPORT_SYMBOL(iov_iter_alignment);
  519. unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
  520. {
  521. unsigned long res = 0;
  522. size_t size = i->count;
  523. if (!size)
  524. return 0;
  525. iterate_all_kinds(i, size, v,
  526. (res |= (!res ? 0 : (unsigned long)v.iov_base) |
  527. (size != v.iov_len ? size : 0), 0),
  528. (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
  529. (size != v.bv_len ? size : 0)),
  530. (res |= (!res ? 0 : (unsigned long)v.iov_base) |
  531. (size != v.iov_len ? size : 0))
  532. );
  533. return res;
  534. }
  535. EXPORT_SYMBOL(iov_iter_gap_alignment);
  536. ssize_t iov_iter_get_pages(struct iov_iter *i,
  537. struct page **pages, size_t maxsize, unsigned maxpages,
  538. size_t *start)
  539. {
  540. if (maxsize > i->count)
  541. maxsize = i->count;
  542. if (!maxsize)
  543. return 0;
  544. iterate_all_kinds(i, maxsize, v, ({
  545. unsigned long addr = (unsigned long)v.iov_base;
  546. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  547. int n;
  548. int res;
  549. if (len > maxpages * PAGE_SIZE)
  550. len = maxpages * PAGE_SIZE;
  551. addr &= ~(PAGE_SIZE - 1);
  552. n = DIV_ROUND_UP(len, PAGE_SIZE);
  553. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  554. if (unlikely(res < 0))
  555. return res;
  556. return (res == n ? len : res * PAGE_SIZE) - *start;
  557. 0;}),({
  558. /* can't be more than PAGE_SIZE */
  559. *start = v.bv_offset;
  560. get_page(*pages = v.bv_page);
  561. return v.bv_len;
  562. }),({
  563. return -EFAULT;
  564. })
  565. )
  566. return 0;
  567. }
  568. EXPORT_SYMBOL(iov_iter_get_pages);
  569. static struct page **get_pages_array(size_t n)
  570. {
  571. struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  572. if (!p)
  573. p = vmalloc(n * sizeof(struct page *));
  574. return p;
  575. }
  576. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  577. struct page ***pages, size_t maxsize,
  578. size_t *start)
  579. {
  580. struct page **p;
  581. if (maxsize > i->count)
  582. maxsize = i->count;
  583. if (!maxsize)
  584. return 0;
  585. iterate_all_kinds(i, maxsize, v, ({
  586. unsigned long addr = (unsigned long)v.iov_base;
  587. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  588. int n;
  589. int res;
  590. addr &= ~(PAGE_SIZE - 1);
  591. n = DIV_ROUND_UP(len, PAGE_SIZE);
  592. p = get_pages_array(n);
  593. if (!p)
  594. return -ENOMEM;
  595. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  596. if (unlikely(res < 0)) {
  597. kvfree(p);
  598. return res;
  599. }
  600. *pages = p;
  601. return (res == n ? len : res * PAGE_SIZE) - *start;
  602. 0;}),({
  603. /* can't be more than PAGE_SIZE */
  604. *start = v.bv_offset;
  605. *pages = p = get_pages_array(1);
  606. if (!p)
  607. return -ENOMEM;
  608. get_page(*p = v.bv_page);
  609. return v.bv_len;
  610. }),({
  611. return -EFAULT;
  612. })
  613. )
  614. return 0;
  615. }
  616. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  617. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  618. struct iov_iter *i)
  619. {
  620. char *to = addr;
  621. __wsum sum, next;
  622. size_t off = 0;
  623. if (unlikely(bytes > i->count))
  624. bytes = i->count;
  625. if (unlikely(!bytes))
  626. return 0;
  627. sum = *csum;
  628. iterate_and_advance(i, bytes, v, ({
  629. int err = 0;
  630. next = csum_and_copy_from_user(v.iov_base,
  631. (to += v.iov_len) - v.iov_len,
  632. v.iov_len, 0, &err);
  633. if (!err) {
  634. sum = csum_block_add(sum, next, off);
  635. off += v.iov_len;
  636. }
  637. err ? v.iov_len : 0;
  638. }), ({
  639. char *p = kmap_atomic(v.bv_page);
  640. next = csum_partial_copy_nocheck(p + v.bv_offset,
  641. (to += v.bv_len) - v.bv_len,
  642. v.bv_len, 0);
  643. kunmap_atomic(p);
  644. sum = csum_block_add(sum, next, off);
  645. off += v.bv_len;
  646. }),({
  647. next = csum_partial_copy_nocheck(v.iov_base,
  648. (to += v.iov_len) - v.iov_len,
  649. v.iov_len, 0);
  650. sum = csum_block_add(sum, next, off);
  651. off += v.iov_len;
  652. })
  653. )
  654. *csum = sum;
  655. return bytes;
  656. }
  657. EXPORT_SYMBOL(csum_and_copy_from_iter);
  658. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
  659. struct iov_iter *i)
  660. {
  661. const char *from = addr;
  662. __wsum sum, next;
  663. size_t off = 0;
  664. if (unlikely(bytes > i->count))
  665. bytes = i->count;
  666. if (unlikely(!bytes))
  667. return 0;
  668. sum = *csum;
  669. iterate_and_advance(i, bytes, v, ({
  670. int err = 0;
  671. next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
  672. v.iov_base,
  673. v.iov_len, 0, &err);
  674. if (!err) {
  675. sum = csum_block_add(sum, next, off);
  676. off += v.iov_len;
  677. }
  678. err ? v.iov_len : 0;
  679. }), ({
  680. char *p = kmap_atomic(v.bv_page);
  681. next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
  682. p + v.bv_offset,
  683. v.bv_len, 0);
  684. kunmap_atomic(p);
  685. sum = csum_block_add(sum, next, off);
  686. off += v.bv_len;
  687. }),({
  688. next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
  689. v.iov_base,
  690. v.iov_len, 0);
  691. sum = csum_block_add(sum, next, off);
  692. off += v.iov_len;
  693. })
  694. )
  695. *csum = sum;
  696. return bytes;
  697. }
  698. EXPORT_SYMBOL(csum_and_copy_to_iter);
  699. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  700. {
  701. size_t size = i->count;
  702. int npages = 0;
  703. if (!size)
  704. return 0;
  705. iterate_all_kinds(i, size, v, ({
  706. unsigned long p = (unsigned long)v.iov_base;
  707. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  708. - p / PAGE_SIZE;
  709. if (npages >= maxpages)
  710. return maxpages;
  711. 0;}),({
  712. npages++;
  713. if (npages >= maxpages)
  714. return maxpages;
  715. }),({
  716. unsigned long p = (unsigned long)v.iov_base;
  717. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  718. - p / PAGE_SIZE;
  719. if (npages >= maxpages)
  720. return maxpages;
  721. })
  722. )
  723. return npages;
  724. }
  725. EXPORT_SYMBOL(iov_iter_npages);
  726. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  727. {
  728. *new = *old;
  729. if (new->type & ITER_BVEC)
  730. return new->bvec = kmemdup(new->bvec,
  731. new->nr_segs * sizeof(struct bio_vec),
  732. flags);
  733. else
  734. /* iovec and kvec have identical layout */
  735. return new->iov = kmemdup(new->iov,
  736. new->nr_segs * sizeof(struct iovec),
  737. flags);
  738. }
  739. EXPORT_SYMBOL(dup_iter);
  740. int import_iovec(int type, const struct iovec __user * uvector,
  741. unsigned nr_segs, unsigned fast_segs,
  742. struct iovec **iov, struct iov_iter *i)
  743. {
  744. ssize_t n;
  745. struct iovec *p;
  746. n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  747. *iov, &p);
  748. if (n < 0) {
  749. if (p != *iov)
  750. kfree(p);
  751. *iov = NULL;
  752. return n;
  753. }
  754. iov_iter_init(i, type, p, nr_segs, n);
  755. *iov = p == *iov ? NULL : p;
  756. return 0;
  757. }
  758. EXPORT_SYMBOL(import_iovec);
  759. #ifdef CONFIG_COMPAT
  760. #include <linux/compat.h>
  761. int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  762. unsigned nr_segs, unsigned fast_segs,
  763. struct iovec **iov, struct iov_iter *i)
  764. {
  765. ssize_t n;
  766. struct iovec *p;
  767. n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  768. *iov, &p);
  769. if (n < 0) {
  770. if (p != *iov)
  771. kfree(p);
  772. *iov = NULL;
  773. return n;
  774. }
  775. iov_iter_init(i, type, p, nr_segs, n);
  776. *iov = p == *iov ? NULL : p;
  777. return 0;
  778. }
  779. #endif
  780. int import_single_range(int rw, void __user *buf, size_t len,
  781. struct iovec *iov, struct iov_iter *i)
  782. {
  783. if (len > MAX_RW_COUNT)
  784. len = MAX_RW_COUNT;
  785. if (unlikely(!access_ok(!rw, buf, len)))
  786. return -EFAULT;
  787. iov->iov_base = buf;
  788. iov->iov_len = len;
  789. iov_iter_init(i, rw, iov, 1, len);
  790. return 0;
  791. }
  792. EXPORT_SYMBOL(import_single_range);