iov_iter.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #include <linux/vmalloc.h>
  6. #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
  7. size_t left; \
  8. size_t wanted = n; \
  9. __p = i->iov; \
  10. __v.iov_len = min(n, __p->iov_len - skip); \
  11. if (likely(__v.iov_len)) { \
  12. __v.iov_base = __p->iov_base + skip; \
  13. left = (STEP); \
  14. __v.iov_len -= left; \
  15. skip += __v.iov_len; \
  16. n -= __v.iov_len; \
  17. } else { \
  18. left = 0; \
  19. } \
  20. while (unlikely(!left && n)) { \
  21. __p++; \
  22. __v.iov_len = min(n, __p->iov_len); \
  23. if (unlikely(!__v.iov_len)) \
  24. continue; \
  25. __v.iov_base = __p->iov_base; \
  26. left = (STEP); \
  27. __v.iov_len -= left; \
  28. skip = __v.iov_len; \
  29. n -= __v.iov_len; \
  30. } \
  31. n = wanted - n; \
  32. }
  33. #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
  34. size_t wanted = n; \
  35. __p = i->bvec; \
  36. __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
  37. if (likely(__v.bv_len)) { \
  38. __v.bv_page = __p->bv_page; \
  39. __v.bv_offset = __p->bv_offset + skip; \
  40. (void)(STEP); \
  41. skip += __v.bv_len; \
  42. n -= __v.bv_len; \
  43. } \
  44. while (unlikely(n)) { \
  45. __p++; \
  46. __v.bv_len = min_t(size_t, n, __p->bv_len); \
  47. if (unlikely(!__v.bv_len)) \
  48. continue; \
  49. __v.bv_page = __p->bv_page; \
  50. __v.bv_offset = __p->bv_offset; \
  51. (void)(STEP); \
  52. skip = __v.bv_len; \
  53. n -= __v.bv_len; \
  54. } \
  55. n = wanted; \
  56. }
  57. #define iterate_all_kinds(i, n, v, I, B) { \
  58. size_t skip = i->iov_offset; \
  59. if (unlikely(i->type & ITER_BVEC)) { \
  60. const struct bio_vec *bvec; \
  61. struct bio_vec v; \
  62. iterate_bvec(i, n, v, bvec, skip, (B)) \
  63. } else { \
  64. const struct iovec *iov; \
  65. struct iovec v; \
  66. iterate_iovec(i, n, v, iov, skip, (I)) \
  67. } \
  68. }
  69. #define iterate_and_advance(i, n, v, I, B) { \
  70. size_t skip = i->iov_offset; \
  71. if (unlikely(i->type & ITER_BVEC)) { \
  72. const struct bio_vec *bvec; \
  73. struct bio_vec v; \
  74. iterate_bvec(i, n, v, bvec, skip, (B)) \
  75. if (skip == bvec->bv_len) { \
  76. bvec++; \
  77. skip = 0; \
  78. } \
  79. i->nr_segs -= bvec - i->bvec; \
  80. i->bvec = bvec; \
  81. } else { \
  82. const struct iovec *iov; \
  83. struct iovec v; \
  84. iterate_iovec(i, n, v, iov, skip, (I)) \
  85. if (skip == iov->iov_len) { \
  86. iov++; \
  87. skip = 0; \
  88. } \
  89. i->nr_segs -= iov - i->iov; \
  90. i->iov = iov; \
  91. } \
  92. i->count -= n; \
  93. i->iov_offset = skip; \
  94. }
  95. static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
  96. {
  97. size_t skip, copy, left, wanted;
  98. const struct iovec *iov;
  99. char __user *buf;
  100. if (unlikely(bytes > i->count))
  101. bytes = i->count;
  102. if (unlikely(!bytes))
  103. return 0;
  104. wanted = bytes;
  105. iov = i->iov;
  106. skip = i->iov_offset;
  107. buf = iov->iov_base + skip;
  108. copy = min(bytes, iov->iov_len - skip);
  109. left = __copy_to_user(buf, from, copy);
  110. copy -= left;
  111. skip += copy;
  112. from += copy;
  113. bytes -= copy;
  114. while (unlikely(!left && bytes)) {
  115. iov++;
  116. buf = iov->iov_base;
  117. copy = min(bytes, iov->iov_len);
  118. left = __copy_to_user(buf, from, copy);
  119. copy -= left;
  120. skip = copy;
  121. from += copy;
  122. bytes -= copy;
  123. }
  124. if (skip == iov->iov_len) {
  125. iov++;
  126. skip = 0;
  127. }
  128. i->count -= wanted - bytes;
  129. i->nr_segs -= iov - i->iov;
  130. i->iov = iov;
  131. i->iov_offset = skip;
  132. return wanted - bytes;
  133. }
  134. static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
  135. {
  136. size_t skip, copy, left, wanted;
  137. const struct iovec *iov;
  138. char __user *buf;
  139. if (unlikely(bytes > i->count))
  140. bytes = i->count;
  141. if (unlikely(!bytes))
  142. return 0;
  143. wanted = bytes;
  144. iov = i->iov;
  145. skip = i->iov_offset;
  146. buf = iov->iov_base + skip;
  147. copy = min(bytes, iov->iov_len - skip);
  148. left = __copy_from_user(to, buf, copy);
  149. copy -= left;
  150. skip += copy;
  151. to += copy;
  152. bytes -= copy;
  153. while (unlikely(!left && bytes)) {
  154. iov++;
  155. buf = iov->iov_base;
  156. copy = min(bytes, iov->iov_len);
  157. left = __copy_from_user(to, buf, copy);
  158. copy -= left;
  159. skip = copy;
  160. to += copy;
  161. bytes -= copy;
  162. }
  163. if (skip == iov->iov_len) {
  164. iov++;
  165. skip = 0;
  166. }
  167. i->count -= wanted - bytes;
  168. i->nr_segs -= iov - i->iov;
  169. i->iov = iov;
  170. i->iov_offset = skip;
  171. return wanted - bytes;
  172. }
  173. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  174. struct iov_iter *i)
  175. {
  176. size_t skip, copy, left, wanted;
  177. const struct iovec *iov;
  178. char __user *buf;
  179. void *kaddr, *from;
  180. if (unlikely(bytes > i->count))
  181. bytes = i->count;
  182. if (unlikely(!bytes))
  183. return 0;
  184. wanted = bytes;
  185. iov = i->iov;
  186. skip = i->iov_offset;
  187. buf = iov->iov_base + skip;
  188. copy = min(bytes, iov->iov_len - skip);
  189. if (!fault_in_pages_writeable(buf, copy)) {
  190. kaddr = kmap_atomic(page);
  191. from = kaddr + offset;
  192. /* first chunk, usually the only one */
  193. left = __copy_to_user_inatomic(buf, from, copy);
  194. copy -= left;
  195. skip += copy;
  196. from += copy;
  197. bytes -= copy;
  198. while (unlikely(!left && bytes)) {
  199. iov++;
  200. buf = iov->iov_base;
  201. copy = min(bytes, iov->iov_len);
  202. left = __copy_to_user_inatomic(buf, from, copy);
  203. copy -= left;
  204. skip = copy;
  205. from += copy;
  206. bytes -= copy;
  207. }
  208. if (likely(!bytes)) {
  209. kunmap_atomic(kaddr);
  210. goto done;
  211. }
  212. offset = from - kaddr;
  213. buf += copy;
  214. kunmap_atomic(kaddr);
  215. copy = min(bytes, iov->iov_len - skip);
  216. }
  217. /* Too bad - revert to non-atomic kmap */
  218. kaddr = kmap(page);
  219. from = kaddr + offset;
  220. left = __copy_to_user(buf, from, copy);
  221. copy -= left;
  222. skip += copy;
  223. from += copy;
  224. bytes -= copy;
  225. while (unlikely(!left && bytes)) {
  226. iov++;
  227. buf = iov->iov_base;
  228. copy = min(bytes, iov->iov_len);
  229. left = __copy_to_user(buf, from, copy);
  230. copy -= left;
  231. skip = copy;
  232. from += copy;
  233. bytes -= copy;
  234. }
  235. kunmap(page);
  236. done:
  237. if (skip == iov->iov_len) {
  238. iov++;
  239. skip = 0;
  240. }
  241. i->count -= wanted - bytes;
  242. i->nr_segs -= iov - i->iov;
  243. i->iov = iov;
  244. i->iov_offset = skip;
  245. return wanted - bytes;
  246. }
  247. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  248. struct iov_iter *i)
  249. {
  250. size_t skip, copy, left, wanted;
  251. const struct iovec *iov;
  252. char __user *buf;
  253. void *kaddr, *to;
  254. if (unlikely(bytes > i->count))
  255. bytes = i->count;
  256. if (unlikely(!bytes))
  257. return 0;
  258. wanted = bytes;
  259. iov = i->iov;
  260. skip = i->iov_offset;
  261. buf = iov->iov_base + skip;
  262. copy = min(bytes, iov->iov_len - skip);
  263. if (!fault_in_pages_readable(buf, copy)) {
  264. kaddr = kmap_atomic(page);
  265. to = kaddr + offset;
  266. /* first chunk, usually the only one */
  267. left = __copy_from_user_inatomic(to, buf, copy);
  268. copy -= left;
  269. skip += copy;
  270. to += copy;
  271. bytes -= copy;
  272. while (unlikely(!left && bytes)) {
  273. iov++;
  274. buf = iov->iov_base;
  275. copy = min(bytes, iov->iov_len);
  276. left = __copy_from_user_inatomic(to, buf, copy);
  277. copy -= left;
  278. skip = copy;
  279. to += copy;
  280. bytes -= copy;
  281. }
  282. if (likely(!bytes)) {
  283. kunmap_atomic(kaddr);
  284. goto done;
  285. }
  286. offset = to - kaddr;
  287. buf += copy;
  288. kunmap_atomic(kaddr);
  289. copy = min(bytes, iov->iov_len - skip);
  290. }
  291. /* Too bad - revert to non-atomic kmap */
  292. kaddr = kmap(page);
  293. to = kaddr + offset;
  294. left = __copy_from_user(to, buf, copy);
  295. copy -= left;
  296. skip += copy;
  297. to += copy;
  298. bytes -= copy;
  299. while (unlikely(!left && bytes)) {
  300. iov++;
  301. buf = iov->iov_base;
  302. copy = min(bytes, iov->iov_len);
  303. left = __copy_from_user(to, buf, copy);
  304. copy -= left;
  305. skip = copy;
  306. to += copy;
  307. bytes -= copy;
  308. }
  309. kunmap(page);
  310. done:
  311. if (skip == iov->iov_len) {
  312. iov++;
  313. skip = 0;
  314. }
  315. i->count -= wanted - bytes;
  316. i->nr_segs -= iov - i->iov;
  317. i->iov = iov;
  318. i->iov_offset = skip;
  319. return wanted - bytes;
  320. }
  321. static size_t zero_iovec(size_t bytes, struct iov_iter *i)
  322. {
  323. size_t skip, copy, left, wanted;
  324. const struct iovec *iov;
  325. char __user *buf;
  326. if (unlikely(bytes > i->count))
  327. bytes = i->count;
  328. if (unlikely(!bytes))
  329. return 0;
  330. wanted = bytes;
  331. iov = i->iov;
  332. skip = i->iov_offset;
  333. buf = iov->iov_base + skip;
  334. copy = min(bytes, iov->iov_len - skip);
  335. left = __clear_user(buf, copy);
  336. copy -= left;
  337. skip += copy;
  338. bytes -= copy;
  339. while (unlikely(!left && bytes)) {
  340. iov++;
  341. buf = iov->iov_base;
  342. copy = min(bytes, iov->iov_len);
  343. left = __clear_user(buf, copy);
  344. copy -= left;
  345. skip = copy;
  346. bytes -= copy;
  347. }
  348. if (skip == iov->iov_len) {
  349. iov++;
  350. skip = 0;
  351. }
  352. i->count -= wanted - bytes;
  353. i->nr_segs -= iov - i->iov;
  354. i->iov = iov;
  355. i->iov_offset = skip;
  356. return wanted - bytes;
  357. }
  358. /*
  359. * Fault in the first iovec of the given iov_iter, to a maximum length
  360. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  361. * accessed (ie. because it is an invalid address).
  362. *
  363. * writev-intensive code may want this to prefault several iovecs -- that
  364. * would be possible (callers must not rely on the fact that _only_ the
  365. * first iovec will be faulted with the current implementation).
  366. */
  367. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  368. {
  369. if (!(i->type & ITER_BVEC)) {
  370. char __user *buf = i->iov->iov_base + i->iov_offset;
  371. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  372. return fault_in_pages_readable(buf, bytes);
  373. }
  374. return 0;
  375. }
  376. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  377. void iov_iter_init(struct iov_iter *i, int direction,
  378. const struct iovec *iov, unsigned long nr_segs,
  379. size_t count)
  380. {
  381. /* It will get better. Eventually... */
  382. if (segment_eq(get_fs(), KERNEL_DS))
  383. direction |= ITER_KVEC;
  384. i->type = direction;
  385. i->iov = iov;
  386. i->nr_segs = nr_segs;
  387. i->iov_offset = 0;
  388. i->count = count;
  389. }
  390. EXPORT_SYMBOL(iov_iter_init);
  391. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  392. {
  393. char *from = kmap_atomic(page);
  394. memcpy(to, from + offset, len);
  395. kunmap_atomic(from);
  396. }
  397. static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
  398. {
  399. char *to = kmap_atomic(page);
  400. memcpy(to + offset, from, len);
  401. kunmap_atomic(to);
  402. }
  403. static void memzero_page(struct page *page, size_t offset, size_t len)
  404. {
  405. char *addr = kmap_atomic(page);
  406. memset(addr + offset, 0, len);
  407. kunmap_atomic(addr);
  408. }
  409. static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
  410. {
  411. size_t skip, copy, wanted;
  412. const struct bio_vec *bvec;
  413. if (unlikely(bytes > i->count))
  414. bytes = i->count;
  415. if (unlikely(!bytes))
  416. return 0;
  417. wanted = bytes;
  418. bvec = i->bvec;
  419. skip = i->iov_offset;
  420. copy = min_t(size_t, bytes, bvec->bv_len - skip);
  421. memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
  422. skip += copy;
  423. from += copy;
  424. bytes -= copy;
  425. while (bytes) {
  426. bvec++;
  427. copy = min(bytes, (size_t)bvec->bv_len);
  428. memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
  429. skip = copy;
  430. from += copy;
  431. bytes -= copy;
  432. }
  433. if (skip == bvec->bv_len) {
  434. bvec++;
  435. skip = 0;
  436. }
  437. i->count -= wanted - bytes;
  438. i->nr_segs -= bvec - i->bvec;
  439. i->bvec = bvec;
  440. i->iov_offset = skip;
  441. return wanted - bytes;
  442. }
  443. static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
  444. {
  445. size_t skip, copy, wanted;
  446. const struct bio_vec *bvec;
  447. if (unlikely(bytes > i->count))
  448. bytes = i->count;
  449. if (unlikely(!bytes))
  450. return 0;
  451. wanted = bytes;
  452. bvec = i->bvec;
  453. skip = i->iov_offset;
  454. copy = min(bytes, bvec->bv_len - skip);
  455. memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
  456. to += copy;
  457. skip += copy;
  458. bytes -= copy;
  459. while (bytes) {
  460. bvec++;
  461. copy = min(bytes, (size_t)bvec->bv_len);
  462. memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
  463. skip = copy;
  464. to += copy;
  465. bytes -= copy;
  466. }
  467. if (skip == bvec->bv_len) {
  468. bvec++;
  469. skip = 0;
  470. }
  471. i->count -= wanted;
  472. i->nr_segs -= bvec - i->bvec;
  473. i->bvec = bvec;
  474. i->iov_offset = skip;
  475. return wanted;
  476. }
  477. static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
  478. size_t bytes, struct iov_iter *i)
  479. {
  480. void *kaddr = kmap_atomic(page);
  481. size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
  482. kunmap_atomic(kaddr);
  483. return wanted;
  484. }
  485. static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
  486. size_t bytes, struct iov_iter *i)
  487. {
  488. void *kaddr = kmap_atomic(page);
  489. size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
  490. kunmap_atomic(kaddr);
  491. return wanted;
  492. }
  493. static size_t zero_bvec(size_t bytes, struct iov_iter *i)
  494. {
  495. size_t skip, copy, wanted;
  496. const struct bio_vec *bvec;
  497. if (unlikely(bytes > i->count))
  498. bytes = i->count;
  499. if (unlikely(!bytes))
  500. return 0;
  501. wanted = bytes;
  502. bvec = i->bvec;
  503. skip = i->iov_offset;
  504. copy = min_t(size_t, bytes, bvec->bv_len - skip);
  505. memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
  506. skip += copy;
  507. bytes -= copy;
  508. while (bytes) {
  509. bvec++;
  510. copy = min(bytes, (size_t)bvec->bv_len);
  511. memzero_page(bvec->bv_page, bvec->bv_offset, copy);
  512. skip = copy;
  513. bytes -= copy;
  514. }
  515. if (skip == bvec->bv_len) {
  516. bvec++;
  517. skip = 0;
  518. }
  519. i->count -= wanted - bytes;
  520. i->nr_segs -= bvec - i->bvec;
  521. i->bvec = bvec;
  522. i->iov_offset = skip;
  523. return wanted - bytes;
  524. }
  525. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  526. struct iov_iter *i)
  527. {
  528. if (i->type & ITER_BVEC)
  529. return copy_page_to_iter_bvec(page, offset, bytes, i);
  530. else
  531. return copy_page_to_iter_iovec(page, offset, bytes, i);
  532. }
  533. EXPORT_SYMBOL(copy_page_to_iter);
  534. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  535. struct iov_iter *i)
  536. {
  537. if (i->type & ITER_BVEC)
  538. return copy_page_from_iter_bvec(page, offset, bytes, i);
  539. else
  540. return copy_page_from_iter_iovec(page, offset, bytes, i);
  541. }
  542. EXPORT_SYMBOL(copy_page_from_iter);
  543. size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
  544. {
  545. if (i->type & ITER_BVEC)
  546. return copy_to_iter_bvec(addr, bytes, i);
  547. else
  548. return copy_to_iter_iovec(addr, bytes, i);
  549. }
  550. EXPORT_SYMBOL(copy_to_iter);
  551. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  552. {
  553. if (i->type & ITER_BVEC)
  554. return copy_from_iter_bvec(addr, bytes, i);
  555. else
  556. return copy_from_iter_iovec(addr, bytes, i);
  557. }
  558. EXPORT_SYMBOL(copy_from_iter);
  559. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  560. {
  561. if (i->type & ITER_BVEC) {
  562. return zero_bvec(bytes, i);
  563. } else {
  564. return zero_iovec(bytes, i);
  565. }
  566. }
  567. EXPORT_SYMBOL(iov_iter_zero);
  568. size_t iov_iter_copy_from_user_atomic(struct page *page,
  569. struct iov_iter *i, unsigned long offset, size_t bytes)
  570. {
  571. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  572. iterate_all_kinds(i, bytes, v,
  573. __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  574. v.iov_base, v.iov_len),
  575. memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
  576. v.bv_offset, v.bv_len)
  577. )
  578. kunmap_atomic(kaddr);
  579. return bytes;
  580. }
  581. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  582. void iov_iter_advance(struct iov_iter *i, size_t size)
  583. {
  584. iterate_and_advance(i, size, v, 0, 0)
  585. }
  586. EXPORT_SYMBOL(iov_iter_advance);
  587. /*
  588. * Return the count of just the current iov_iter segment.
  589. */
  590. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  591. {
  592. if (i->nr_segs == 1)
  593. return i->count;
  594. else if (i->type & ITER_BVEC)
  595. return min(i->count, i->bvec->bv_len - i->iov_offset);
  596. else
  597. return min(i->count, i->iov->iov_len - i->iov_offset);
  598. }
  599. EXPORT_SYMBOL(iov_iter_single_seg_count);
  600. unsigned long iov_iter_alignment(const struct iov_iter *i)
  601. {
  602. unsigned long res = 0;
  603. size_t size = i->count;
  604. if (!size)
  605. return 0;
  606. iterate_all_kinds(i, size, v,
  607. (res |= (unsigned long)v.iov_base | v.iov_len, 0),
  608. res |= v.bv_offset | v.bv_len
  609. )
  610. return res;
  611. }
  612. EXPORT_SYMBOL(iov_iter_alignment);
  613. ssize_t iov_iter_get_pages(struct iov_iter *i,
  614. struct page **pages, size_t maxsize, unsigned maxpages,
  615. size_t *start)
  616. {
  617. if (maxsize > i->count)
  618. maxsize = i->count;
  619. if (!maxsize)
  620. return 0;
  621. iterate_all_kinds(i, maxsize, v, ({
  622. unsigned long addr = (unsigned long)v.iov_base;
  623. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  624. int n;
  625. int res;
  626. if (len > maxpages * PAGE_SIZE)
  627. len = maxpages * PAGE_SIZE;
  628. addr &= ~(PAGE_SIZE - 1);
  629. n = DIV_ROUND_UP(len, PAGE_SIZE);
  630. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  631. if (unlikely(res < 0))
  632. return res;
  633. return (res == n ? len : res * PAGE_SIZE) - *start;
  634. 0;}),({
  635. /* can't be more than PAGE_SIZE */
  636. *start = v.bv_offset;
  637. get_page(*pages = v.bv_page);
  638. return v.bv_len;
  639. })
  640. )
  641. return 0;
  642. }
  643. EXPORT_SYMBOL(iov_iter_get_pages);
  644. static struct page **get_pages_array(size_t n)
  645. {
  646. struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  647. if (!p)
  648. p = vmalloc(n * sizeof(struct page *));
  649. return p;
  650. }
  651. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  652. struct page ***pages, size_t maxsize,
  653. size_t *start)
  654. {
  655. struct page **p;
  656. if (maxsize > i->count)
  657. maxsize = i->count;
  658. if (!maxsize)
  659. return 0;
  660. iterate_all_kinds(i, maxsize, v, ({
  661. unsigned long addr = (unsigned long)v.iov_base;
  662. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  663. int n;
  664. int res;
  665. addr &= ~(PAGE_SIZE - 1);
  666. n = DIV_ROUND_UP(len, PAGE_SIZE);
  667. p = get_pages_array(n);
  668. if (!p)
  669. return -ENOMEM;
  670. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  671. if (unlikely(res < 0)) {
  672. kvfree(p);
  673. return res;
  674. }
  675. *pages = p;
  676. return (res == n ? len : res * PAGE_SIZE) - *start;
  677. 0;}),({
  678. /* can't be more than PAGE_SIZE */
  679. *start = v.bv_offset;
  680. *pages = p = get_pages_array(1);
  681. if (!p)
  682. return -ENOMEM;
  683. get_page(*p = v.bv_page);
  684. return v.bv_len;
  685. })
  686. )
  687. return 0;
  688. }
  689. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  690. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  691. {
  692. size_t size = i->count;
  693. int npages = 0;
  694. if (!size)
  695. return 0;
  696. iterate_all_kinds(i, size, v, ({
  697. unsigned long p = (unsigned long)v.iov_base;
  698. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  699. - p / PAGE_SIZE;
  700. if (npages >= maxpages)
  701. return maxpages;
  702. 0;}),({
  703. npages++;
  704. if (npages >= maxpages)
  705. return maxpages;
  706. })
  707. )
  708. return npages;
  709. }
  710. EXPORT_SYMBOL(iov_iter_npages);