iov_iter.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #include <linux/vmalloc.h>
  6. static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
  7. {
  8. size_t skip, copy, left, wanted;
  9. const struct iovec *iov;
  10. char __user *buf;
  11. if (unlikely(bytes > i->count))
  12. bytes = i->count;
  13. if (unlikely(!bytes))
  14. return 0;
  15. wanted = bytes;
  16. iov = i->iov;
  17. skip = i->iov_offset;
  18. buf = iov->iov_base + skip;
  19. copy = min(bytes, iov->iov_len - skip);
  20. left = __copy_to_user(buf, from, copy);
  21. copy -= left;
  22. skip += copy;
  23. from += copy;
  24. bytes -= copy;
  25. while (unlikely(!left && bytes)) {
  26. iov++;
  27. buf = iov->iov_base;
  28. copy = min(bytes, iov->iov_len);
  29. left = __copy_to_user(buf, from, copy);
  30. copy -= left;
  31. skip = copy;
  32. from += copy;
  33. bytes -= copy;
  34. }
  35. if (skip == iov->iov_len) {
  36. iov++;
  37. skip = 0;
  38. }
  39. i->count -= wanted - bytes;
  40. i->nr_segs -= iov - i->iov;
  41. i->iov = iov;
  42. i->iov_offset = skip;
  43. return wanted - bytes;
  44. }
  45. static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
  46. {
  47. size_t skip, copy, left, wanted;
  48. const struct iovec *iov;
  49. char __user *buf;
  50. if (unlikely(bytes > i->count))
  51. bytes = i->count;
  52. if (unlikely(!bytes))
  53. return 0;
  54. wanted = bytes;
  55. iov = i->iov;
  56. skip = i->iov_offset;
  57. buf = iov->iov_base + skip;
  58. copy = min(bytes, iov->iov_len - skip);
  59. left = __copy_from_user(to, buf, copy);
  60. copy -= left;
  61. skip += copy;
  62. to += copy;
  63. bytes -= copy;
  64. while (unlikely(!left && bytes)) {
  65. iov++;
  66. buf = iov->iov_base;
  67. copy = min(bytes, iov->iov_len);
  68. left = __copy_from_user(to, buf, copy);
  69. copy -= left;
  70. skip = copy;
  71. to += copy;
  72. bytes -= copy;
  73. }
  74. if (skip == iov->iov_len) {
  75. iov++;
  76. skip = 0;
  77. }
  78. i->count -= wanted - bytes;
  79. i->nr_segs -= iov - i->iov;
  80. i->iov = iov;
  81. i->iov_offset = skip;
  82. return wanted - bytes;
  83. }
  84. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  85. struct iov_iter *i)
  86. {
  87. size_t skip, copy, left, wanted;
  88. const struct iovec *iov;
  89. char __user *buf;
  90. void *kaddr, *from;
  91. if (unlikely(bytes > i->count))
  92. bytes = i->count;
  93. if (unlikely(!bytes))
  94. return 0;
  95. wanted = bytes;
  96. iov = i->iov;
  97. skip = i->iov_offset;
  98. buf = iov->iov_base + skip;
  99. copy = min(bytes, iov->iov_len - skip);
  100. if (!fault_in_pages_writeable(buf, copy)) {
  101. kaddr = kmap_atomic(page);
  102. from = kaddr + offset;
  103. /* first chunk, usually the only one */
  104. left = __copy_to_user_inatomic(buf, from, copy);
  105. copy -= left;
  106. skip += copy;
  107. from += copy;
  108. bytes -= copy;
  109. while (unlikely(!left && bytes)) {
  110. iov++;
  111. buf = iov->iov_base;
  112. copy = min(bytes, iov->iov_len);
  113. left = __copy_to_user_inatomic(buf, from, copy);
  114. copy -= left;
  115. skip = copy;
  116. from += copy;
  117. bytes -= copy;
  118. }
  119. if (likely(!bytes)) {
  120. kunmap_atomic(kaddr);
  121. goto done;
  122. }
  123. offset = from - kaddr;
  124. buf += copy;
  125. kunmap_atomic(kaddr);
  126. copy = min(bytes, iov->iov_len - skip);
  127. }
  128. /* Too bad - revert to non-atomic kmap */
  129. kaddr = kmap(page);
  130. from = kaddr + offset;
  131. left = __copy_to_user(buf, from, copy);
  132. copy -= left;
  133. skip += copy;
  134. from += copy;
  135. bytes -= copy;
  136. while (unlikely(!left && bytes)) {
  137. iov++;
  138. buf = iov->iov_base;
  139. copy = min(bytes, iov->iov_len);
  140. left = __copy_to_user(buf, from, copy);
  141. copy -= left;
  142. skip = copy;
  143. from += copy;
  144. bytes -= copy;
  145. }
  146. kunmap(page);
  147. done:
  148. if (skip == iov->iov_len) {
  149. iov++;
  150. skip = 0;
  151. }
  152. i->count -= wanted - bytes;
  153. i->nr_segs -= iov - i->iov;
  154. i->iov = iov;
  155. i->iov_offset = skip;
  156. return wanted - bytes;
  157. }
  158. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  159. struct iov_iter *i)
  160. {
  161. size_t skip, copy, left, wanted;
  162. const struct iovec *iov;
  163. char __user *buf;
  164. void *kaddr, *to;
  165. if (unlikely(bytes > i->count))
  166. bytes = i->count;
  167. if (unlikely(!bytes))
  168. return 0;
  169. wanted = bytes;
  170. iov = i->iov;
  171. skip = i->iov_offset;
  172. buf = iov->iov_base + skip;
  173. copy = min(bytes, iov->iov_len - skip);
  174. if (!fault_in_pages_readable(buf, copy)) {
  175. kaddr = kmap_atomic(page);
  176. to = kaddr + offset;
  177. /* first chunk, usually the only one */
  178. left = __copy_from_user_inatomic(to, buf, copy);
  179. copy -= left;
  180. skip += copy;
  181. to += copy;
  182. bytes -= copy;
  183. while (unlikely(!left && bytes)) {
  184. iov++;
  185. buf = iov->iov_base;
  186. copy = min(bytes, iov->iov_len);
  187. left = __copy_from_user_inatomic(to, buf, copy);
  188. copy -= left;
  189. skip = copy;
  190. to += copy;
  191. bytes -= copy;
  192. }
  193. if (likely(!bytes)) {
  194. kunmap_atomic(kaddr);
  195. goto done;
  196. }
  197. offset = to - kaddr;
  198. buf += copy;
  199. kunmap_atomic(kaddr);
  200. copy = min(bytes, iov->iov_len - skip);
  201. }
  202. /* Too bad - revert to non-atomic kmap */
  203. kaddr = kmap(page);
  204. to = kaddr + offset;
  205. left = __copy_from_user(to, buf, copy);
  206. copy -= left;
  207. skip += copy;
  208. to += copy;
  209. bytes -= copy;
  210. while (unlikely(!left && bytes)) {
  211. iov++;
  212. buf = iov->iov_base;
  213. copy = min(bytes, iov->iov_len);
  214. left = __copy_from_user(to, buf, copy);
  215. copy -= left;
  216. skip = copy;
  217. to += copy;
  218. bytes -= copy;
  219. }
  220. kunmap(page);
  221. done:
  222. if (skip == iov->iov_len) {
  223. iov++;
  224. skip = 0;
  225. }
  226. i->count -= wanted - bytes;
  227. i->nr_segs -= iov - i->iov;
  228. i->iov = iov;
  229. i->iov_offset = skip;
  230. return wanted - bytes;
  231. }
  232. static size_t zero_iovec(size_t bytes, struct iov_iter *i)
  233. {
  234. size_t skip, copy, left, wanted;
  235. const struct iovec *iov;
  236. char __user *buf;
  237. if (unlikely(bytes > i->count))
  238. bytes = i->count;
  239. if (unlikely(!bytes))
  240. return 0;
  241. wanted = bytes;
  242. iov = i->iov;
  243. skip = i->iov_offset;
  244. buf = iov->iov_base + skip;
  245. copy = min(bytes, iov->iov_len - skip);
  246. left = __clear_user(buf, copy);
  247. copy -= left;
  248. skip += copy;
  249. bytes -= copy;
  250. while (unlikely(!left && bytes)) {
  251. iov++;
  252. buf = iov->iov_base;
  253. copy = min(bytes, iov->iov_len);
  254. left = __clear_user(buf, copy);
  255. copy -= left;
  256. skip = copy;
  257. bytes -= copy;
  258. }
  259. if (skip == iov->iov_len) {
  260. iov++;
  261. skip = 0;
  262. }
  263. i->count -= wanted - bytes;
  264. i->nr_segs -= iov - i->iov;
  265. i->iov = iov;
  266. i->iov_offset = skip;
  267. return wanted - bytes;
  268. }
  269. static size_t __iovec_copy_from_user_inatomic(char *vaddr,
  270. const struct iovec *iov, size_t base, size_t bytes)
  271. {
  272. size_t copied = 0, left = 0;
  273. while (bytes) {
  274. char __user *buf = iov->iov_base + base;
  275. int copy = min(bytes, iov->iov_len - base);
  276. base = 0;
  277. left = __copy_from_user_inatomic(vaddr, buf, copy);
  278. copied += copy;
  279. bytes -= copy;
  280. vaddr += copy;
  281. iov++;
  282. if (unlikely(left))
  283. break;
  284. }
  285. return copied - left;
  286. }
  287. /*
  288. * Copy as much as we can into the page and return the number of bytes which
  289. * were successfully copied. If a fault is encountered then return the number of
  290. * bytes which were copied.
  291. */
  292. static size_t copy_from_user_atomic_iovec(struct page *page,
  293. struct iov_iter *i, unsigned long offset, size_t bytes)
  294. {
  295. char *kaddr;
  296. size_t copied;
  297. kaddr = kmap_atomic(page);
  298. if (likely(i->nr_segs == 1)) {
  299. int left;
  300. char __user *buf = i->iov->iov_base + i->iov_offset;
  301. left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
  302. copied = bytes - left;
  303. } else {
  304. copied = __iovec_copy_from_user_inatomic(kaddr + offset,
  305. i->iov, i->iov_offset, bytes);
  306. }
  307. kunmap_atomic(kaddr);
  308. return copied;
  309. }
  310. static void advance_iovec(struct iov_iter *i, size_t bytes)
  311. {
  312. BUG_ON(i->count < bytes);
  313. if (likely(i->nr_segs == 1)) {
  314. i->iov_offset += bytes;
  315. i->count -= bytes;
  316. } else {
  317. const struct iovec *iov = i->iov;
  318. size_t base = i->iov_offset;
  319. unsigned long nr_segs = i->nr_segs;
  320. /*
  321. * The !iov->iov_len check ensures we skip over unlikely
  322. * zero-length segments (without overruning the iovec).
  323. */
  324. while (bytes || unlikely(i->count && !iov->iov_len)) {
  325. int copy;
  326. copy = min(bytes, iov->iov_len - base);
  327. BUG_ON(!i->count || i->count < copy);
  328. i->count -= copy;
  329. bytes -= copy;
  330. base += copy;
  331. if (iov->iov_len == base) {
  332. iov++;
  333. nr_segs--;
  334. base = 0;
  335. }
  336. }
  337. i->iov = iov;
  338. i->iov_offset = base;
  339. i->nr_segs = nr_segs;
  340. }
  341. }
  342. /*
  343. * Fault in the first iovec of the given iov_iter, to a maximum length
  344. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  345. * accessed (ie. because it is an invalid address).
  346. *
  347. * writev-intensive code may want this to prefault several iovecs -- that
  348. * would be possible (callers must not rely on the fact that _only_ the
  349. * first iovec will be faulted with the current implementation).
  350. */
  351. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  352. {
  353. if (!(i->type & ITER_BVEC)) {
  354. char __user *buf = i->iov->iov_base + i->iov_offset;
  355. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  356. return fault_in_pages_readable(buf, bytes);
  357. }
  358. return 0;
  359. }
  360. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  361. static unsigned long alignment_iovec(const struct iov_iter *i)
  362. {
  363. const struct iovec *iov = i->iov;
  364. unsigned long res;
  365. size_t size = i->count;
  366. size_t n;
  367. if (!size)
  368. return 0;
  369. res = (unsigned long)iov->iov_base + i->iov_offset;
  370. n = iov->iov_len - i->iov_offset;
  371. if (n >= size)
  372. return res | size;
  373. size -= n;
  374. res |= n;
  375. while (size > (++iov)->iov_len) {
  376. res |= (unsigned long)iov->iov_base | iov->iov_len;
  377. size -= iov->iov_len;
  378. }
  379. res |= (unsigned long)iov->iov_base | size;
  380. return res;
  381. }
  382. void iov_iter_init(struct iov_iter *i, int direction,
  383. const struct iovec *iov, unsigned long nr_segs,
  384. size_t count)
  385. {
  386. /* It will get better. Eventually... */
  387. if (segment_eq(get_fs(), KERNEL_DS))
  388. direction |= ITER_KVEC;
  389. i->type = direction;
  390. i->iov = iov;
  391. i->nr_segs = nr_segs;
  392. i->iov_offset = 0;
  393. i->count = count;
  394. }
  395. EXPORT_SYMBOL(iov_iter_init);
  396. static ssize_t get_pages_iovec(struct iov_iter *i,
  397. struct page **pages, size_t maxsize, unsigned maxpages,
  398. size_t *start)
  399. {
  400. size_t offset = i->iov_offset;
  401. const struct iovec *iov = i->iov;
  402. size_t len;
  403. unsigned long addr;
  404. int n;
  405. int res;
  406. len = iov->iov_len - offset;
  407. if (len > i->count)
  408. len = i->count;
  409. if (len > maxsize)
  410. len = maxsize;
  411. addr = (unsigned long)iov->iov_base + offset;
  412. len += *start = addr & (PAGE_SIZE - 1);
  413. if (len > maxpages * PAGE_SIZE)
  414. len = maxpages * PAGE_SIZE;
  415. addr &= ~(PAGE_SIZE - 1);
  416. n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
  417. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  418. if (unlikely(res < 0))
  419. return res;
  420. return (res == n ? len : res * PAGE_SIZE) - *start;
  421. }
  422. static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
  423. struct page ***pages, size_t maxsize,
  424. size_t *start)
  425. {
  426. size_t offset = i->iov_offset;
  427. const struct iovec *iov = i->iov;
  428. size_t len;
  429. unsigned long addr;
  430. void *p;
  431. int n;
  432. int res;
  433. len = iov->iov_len - offset;
  434. if (len > i->count)
  435. len = i->count;
  436. if (len > maxsize)
  437. len = maxsize;
  438. addr = (unsigned long)iov->iov_base + offset;
  439. len += *start = addr & (PAGE_SIZE - 1);
  440. addr &= ~(PAGE_SIZE - 1);
  441. n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
  442. p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  443. if (!p)
  444. p = vmalloc(n * sizeof(struct page *));
  445. if (!p)
  446. return -ENOMEM;
  447. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  448. if (unlikely(res < 0)) {
  449. kvfree(p);
  450. return res;
  451. }
  452. *pages = p;
  453. return (res == n ? len : res * PAGE_SIZE) - *start;
  454. }
  455. static int iov_iter_npages_iovec(const struct iov_iter *i, int maxpages)
  456. {
  457. size_t offset = i->iov_offset;
  458. size_t size = i->count;
  459. const struct iovec *iov = i->iov;
  460. int npages = 0;
  461. int n;
  462. for (n = 0; size && n < i->nr_segs; n++, iov++) {
  463. unsigned long addr = (unsigned long)iov->iov_base + offset;
  464. size_t len = iov->iov_len - offset;
  465. offset = 0;
  466. if (unlikely(!len)) /* empty segment */
  467. continue;
  468. if (len > size)
  469. len = size;
  470. npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
  471. - addr / PAGE_SIZE;
  472. if (npages >= maxpages) /* don't bother going further */
  473. return maxpages;
  474. size -= len;
  475. offset = 0;
  476. }
  477. return min(npages, maxpages);
  478. }
  479. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  480. {
  481. char *from = kmap_atomic(page);
  482. memcpy(to, from + offset, len);
  483. kunmap_atomic(from);
  484. }
  485. static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
  486. {
  487. char *to = kmap_atomic(page);
  488. memcpy(to + offset, from, len);
  489. kunmap_atomic(to);
  490. }
  491. static void memzero_page(struct page *page, size_t offset, size_t len)
  492. {
  493. char *addr = kmap_atomic(page);
  494. memset(addr + offset, 0, len);
  495. kunmap_atomic(addr);
  496. }
  497. static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
  498. {
  499. size_t skip, copy, wanted;
  500. const struct bio_vec *bvec;
  501. if (unlikely(bytes > i->count))
  502. bytes = i->count;
  503. if (unlikely(!bytes))
  504. return 0;
  505. wanted = bytes;
  506. bvec = i->bvec;
  507. skip = i->iov_offset;
  508. copy = min_t(size_t, bytes, bvec->bv_len - skip);
  509. memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
  510. skip += copy;
  511. from += copy;
  512. bytes -= copy;
  513. while (bytes) {
  514. bvec++;
  515. copy = min(bytes, (size_t)bvec->bv_len);
  516. memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
  517. skip = copy;
  518. from += copy;
  519. bytes -= copy;
  520. }
  521. if (skip == bvec->bv_len) {
  522. bvec++;
  523. skip = 0;
  524. }
  525. i->count -= wanted - bytes;
  526. i->nr_segs -= bvec - i->bvec;
  527. i->bvec = bvec;
  528. i->iov_offset = skip;
  529. return wanted - bytes;
  530. }
  531. static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
  532. {
  533. size_t skip, copy, wanted;
  534. const struct bio_vec *bvec;
  535. if (unlikely(bytes > i->count))
  536. bytes = i->count;
  537. if (unlikely(!bytes))
  538. return 0;
  539. wanted = bytes;
  540. bvec = i->bvec;
  541. skip = i->iov_offset;
  542. copy = min(bytes, bvec->bv_len - skip);
  543. memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
  544. to += copy;
  545. skip += copy;
  546. bytes -= copy;
  547. while (bytes) {
  548. bvec++;
  549. copy = min(bytes, (size_t)bvec->bv_len);
  550. memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
  551. skip = copy;
  552. to += copy;
  553. bytes -= copy;
  554. }
  555. if (skip == bvec->bv_len) {
  556. bvec++;
  557. skip = 0;
  558. }
  559. i->count -= wanted;
  560. i->nr_segs -= bvec - i->bvec;
  561. i->bvec = bvec;
  562. i->iov_offset = skip;
  563. return wanted;
  564. }
  565. static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
  566. size_t bytes, struct iov_iter *i)
  567. {
  568. void *kaddr = kmap_atomic(page);
  569. size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
  570. kunmap_atomic(kaddr);
  571. return wanted;
  572. }
  573. static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
  574. size_t bytes, struct iov_iter *i)
  575. {
  576. void *kaddr = kmap_atomic(page);
  577. size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
  578. kunmap_atomic(kaddr);
  579. return wanted;
  580. }
  581. static size_t zero_bvec(size_t bytes, struct iov_iter *i)
  582. {
  583. size_t skip, copy, wanted;
  584. const struct bio_vec *bvec;
  585. if (unlikely(bytes > i->count))
  586. bytes = i->count;
  587. if (unlikely(!bytes))
  588. return 0;
  589. wanted = bytes;
  590. bvec = i->bvec;
  591. skip = i->iov_offset;
  592. copy = min_t(size_t, bytes, bvec->bv_len - skip);
  593. memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
  594. skip += copy;
  595. bytes -= copy;
  596. while (bytes) {
  597. bvec++;
  598. copy = min(bytes, (size_t)bvec->bv_len);
  599. memzero_page(bvec->bv_page, bvec->bv_offset, copy);
  600. skip = copy;
  601. bytes -= copy;
  602. }
  603. if (skip == bvec->bv_len) {
  604. bvec++;
  605. skip = 0;
  606. }
  607. i->count -= wanted - bytes;
  608. i->nr_segs -= bvec - i->bvec;
  609. i->bvec = bvec;
  610. i->iov_offset = skip;
  611. return wanted - bytes;
  612. }
  613. static size_t copy_from_user_bvec(struct page *page,
  614. struct iov_iter *i, unsigned long offset, size_t bytes)
  615. {
  616. char *kaddr;
  617. size_t left;
  618. const struct bio_vec *bvec;
  619. size_t base = i->iov_offset;
  620. kaddr = kmap_atomic(page);
  621. for (left = bytes, bvec = i->bvec; left; bvec++, base = 0) {
  622. size_t copy = min(left, bvec->bv_len - base);
  623. if (!bvec->bv_len)
  624. continue;
  625. memcpy_from_page(kaddr + offset, bvec->bv_page,
  626. bvec->bv_offset + base, copy);
  627. offset += copy;
  628. left -= copy;
  629. }
  630. kunmap_atomic(kaddr);
  631. return bytes;
  632. }
  633. static void advance_bvec(struct iov_iter *i, size_t bytes)
  634. {
  635. BUG_ON(i->count < bytes);
  636. if (likely(i->nr_segs == 1)) {
  637. i->iov_offset += bytes;
  638. i->count -= bytes;
  639. } else {
  640. const struct bio_vec *bvec = i->bvec;
  641. size_t base = i->iov_offset;
  642. unsigned long nr_segs = i->nr_segs;
  643. /*
  644. * The !iov->iov_len check ensures we skip over unlikely
  645. * zero-length segments (without overruning the iovec).
  646. */
  647. while (bytes || unlikely(i->count && !bvec->bv_len)) {
  648. int copy;
  649. copy = min(bytes, bvec->bv_len - base);
  650. BUG_ON(!i->count || i->count < copy);
  651. i->count -= copy;
  652. bytes -= copy;
  653. base += copy;
  654. if (bvec->bv_len == base) {
  655. bvec++;
  656. nr_segs--;
  657. base = 0;
  658. }
  659. }
  660. i->bvec = bvec;
  661. i->iov_offset = base;
  662. i->nr_segs = nr_segs;
  663. }
  664. }
  665. static unsigned long alignment_bvec(const struct iov_iter *i)
  666. {
  667. const struct bio_vec *bvec = i->bvec;
  668. unsigned long res;
  669. size_t size = i->count;
  670. size_t n;
  671. if (!size)
  672. return 0;
  673. res = bvec->bv_offset + i->iov_offset;
  674. n = bvec->bv_len - i->iov_offset;
  675. if (n >= size)
  676. return res | size;
  677. size -= n;
  678. res |= n;
  679. while (size > (++bvec)->bv_len) {
  680. res |= bvec->bv_offset | bvec->bv_len;
  681. size -= bvec->bv_len;
  682. }
  683. res |= bvec->bv_offset | size;
  684. return res;
  685. }
  686. static ssize_t get_pages_bvec(struct iov_iter *i,
  687. struct page **pages, size_t maxsize, unsigned maxpages,
  688. size_t *start)
  689. {
  690. const struct bio_vec *bvec = i->bvec;
  691. size_t len = bvec->bv_len - i->iov_offset;
  692. if (len > i->count)
  693. len = i->count;
  694. if (len > maxsize)
  695. len = maxsize;
  696. /* can't be more than PAGE_SIZE */
  697. *start = bvec->bv_offset + i->iov_offset;
  698. get_page(*pages = bvec->bv_page);
  699. return len;
  700. }
  701. static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
  702. struct page ***pages, size_t maxsize,
  703. size_t *start)
  704. {
  705. const struct bio_vec *bvec = i->bvec;
  706. size_t len = bvec->bv_len - i->iov_offset;
  707. if (len > i->count)
  708. len = i->count;
  709. if (len > maxsize)
  710. len = maxsize;
  711. *start = bvec->bv_offset + i->iov_offset;
  712. *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
  713. if (!*pages)
  714. return -ENOMEM;
  715. get_page(**pages = bvec->bv_page);
  716. return len;
  717. }
  718. static int iov_iter_npages_bvec(const struct iov_iter *i, int maxpages)
  719. {
  720. size_t offset = i->iov_offset;
  721. size_t size = i->count;
  722. const struct bio_vec *bvec = i->bvec;
  723. int npages = 0;
  724. int n;
  725. for (n = 0; size && n < i->nr_segs; n++, bvec++) {
  726. size_t len = bvec->bv_len - offset;
  727. offset = 0;
  728. if (unlikely(!len)) /* empty segment */
  729. continue;
  730. if (len > size)
  731. len = size;
  732. npages++;
  733. if (npages >= maxpages) /* don't bother going further */
  734. return maxpages;
  735. size -= len;
  736. offset = 0;
  737. }
  738. return min(npages, maxpages);
  739. }
  740. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  741. struct iov_iter *i)
  742. {
  743. if (i->type & ITER_BVEC)
  744. return copy_page_to_iter_bvec(page, offset, bytes, i);
  745. else
  746. return copy_page_to_iter_iovec(page, offset, bytes, i);
  747. }
  748. EXPORT_SYMBOL(copy_page_to_iter);
  749. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  750. struct iov_iter *i)
  751. {
  752. if (i->type & ITER_BVEC)
  753. return copy_page_from_iter_bvec(page, offset, bytes, i);
  754. else
  755. return copy_page_from_iter_iovec(page, offset, bytes, i);
  756. }
  757. EXPORT_SYMBOL(copy_page_from_iter);
  758. size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
  759. {
  760. if (i->type & ITER_BVEC)
  761. return copy_to_iter_bvec(addr, bytes, i);
  762. else
  763. return copy_to_iter_iovec(addr, bytes, i);
  764. }
  765. EXPORT_SYMBOL(copy_to_iter);
  766. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  767. {
  768. if (i->type & ITER_BVEC)
  769. return copy_from_iter_bvec(addr, bytes, i);
  770. else
  771. return copy_from_iter_iovec(addr, bytes, i);
  772. }
  773. EXPORT_SYMBOL(copy_from_iter);
  774. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  775. {
  776. if (i->type & ITER_BVEC) {
  777. return zero_bvec(bytes, i);
  778. } else {
  779. return zero_iovec(bytes, i);
  780. }
  781. }
  782. EXPORT_SYMBOL(iov_iter_zero);
  783. size_t iov_iter_copy_from_user_atomic(struct page *page,
  784. struct iov_iter *i, unsigned long offset, size_t bytes)
  785. {
  786. if (i->type & ITER_BVEC)
  787. return copy_from_user_bvec(page, i, offset, bytes);
  788. else
  789. return copy_from_user_atomic_iovec(page, i, offset, bytes);
  790. }
  791. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  792. void iov_iter_advance(struct iov_iter *i, size_t size)
  793. {
  794. if (i->type & ITER_BVEC)
  795. advance_bvec(i, size);
  796. else
  797. advance_iovec(i, size);
  798. }
  799. EXPORT_SYMBOL(iov_iter_advance);
  800. /*
  801. * Return the count of just the current iov_iter segment.
  802. */
  803. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  804. {
  805. if (i->nr_segs == 1)
  806. return i->count;
  807. else if (i->type & ITER_BVEC)
  808. return min(i->count, i->iov->iov_len - i->iov_offset);
  809. else
  810. return min(i->count, i->bvec->bv_len - i->iov_offset);
  811. }
  812. EXPORT_SYMBOL(iov_iter_single_seg_count);
  813. unsigned long iov_iter_alignment(const struct iov_iter *i)
  814. {
  815. if (i->type & ITER_BVEC)
  816. return alignment_bvec(i);
  817. else
  818. return alignment_iovec(i);
  819. }
  820. EXPORT_SYMBOL(iov_iter_alignment);
  821. ssize_t iov_iter_get_pages(struct iov_iter *i,
  822. struct page **pages, size_t maxsize, unsigned maxpages,
  823. size_t *start)
  824. {
  825. if (i->type & ITER_BVEC)
  826. return get_pages_bvec(i, pages, maxsize, maxpages, start);
  827. else
  828. return get_pages_iovec(i, pages, maxsize, maxpages, start);
  829. }
  830. EXPORT_SYMBOL(iov_iter_get_pages);
  831. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  832. struct page ***pages, size_t maxsize,
  833. size_t *start)
  834. {
  835. if (i->type & ITER_BVEC)
  836. return get_pages_alloc_bvec(i, pages, maxsize, start);
  837. else
  838. return get_pages_alloc_iovec(i, pages, maxsize, start);
  839. }
  840. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  841. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  842. {
  843. if (i->type & ITER_BVEC)
  844. return iov_iter_npages_bvec(i, maxpages);
  845. else
  846. return iov_iter_npages_iovec(i, maxpages);
  847. }
  848. EXPORT_SYMBOL(iov_iter_npages);