iov_iter.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387
  1. #include <linux/export.h>
  2. #include <linux/bvec.h>
  3. #include <linux/uio.h>
  4. #include <linux/pagemap.h>
  5. #include <linux/slab.h>
  6. #include <linux/vmalloc.h>
  7. #include <linux/splice.h>
  8. #include <net/checksum.h>
  9. #define PIPE_PARANOIA /* for now */
  10. #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
  11. size_t left; \
  12. size_t wanted = n; \
  13. __p = i->iov; \
  14. __v.iov_len = min(n, __p->iov_len - skip); \
  15. if (likely(__v.iov_len)) { \
  16. __v.iov_base = __p->iov_base + skip; \
  17. left = (STEP); \
  18. __v.iov_len -= left; \
  19. skip += __v.iov_len; \
  20. n -= __v.iov_len; \
  21. } else { \
  22. left = 0; \
  23. } \
  24. while (unlikely(!left && n)) { \
  25. __p++; \
  26. __v.iov_len = min(n, __p->iov_len); \
  27. if (unlikely(!__v.iov_len)) \
  28. continue; \
  29. __v.iov_base = __p->iov_base; \
  30. left = (STEP); \
  31. __v.iov_len -= left; \
  32. skip = __v.iov_len; \
  33. n -= __v.iov_len; \
  34. } \
  35. n = wanted - n; \
  36. }
  37. #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
  38. size_t wanted = n; \
  39. __p = i->kvec; \
  40. __v.iov_len = min(n, __p->iov_len - skip); \
  41. if (likely(__v.iov_len)) { \
  42. __v.iov_base = __p->iov_base + skip; \
  43. (void)(STEP); \
  44. skip += __v.iov_len; \
  45. n -= __v.iov_len; \
  46. } \
  47. while (unlikely(n)) { \
  48. __p++; \
  49. __v.iov_len = min(n, __p->iov_len); \
  50. if (unlikely(!__v.iov_len)) \
  51. continue; \
  52. __v.iov_base = __p->iov_base; \
  53. (void)(STEP); \
  54. skip = __v.iov_len; \
  55. n -= __v.iov_len; \
  56. } \
  57. n = wanted; \
  58. }
  59. #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
  60. struct bvec_iter __start; \
  61. __start.bi_size = n; \
  62. __start.bi_bvec_done = skip; \
  63. __start.bi_idx = 0; \
  64. for_each_bvec(__v, i->bvec, __bi, __start) { \
  65. if (!__v.bv_len) \
  66. continue; \
  67. (void)(STEP); \
  68. } \
  69. }
  70. #define iterate_all_kinds(i, n, v, I, B, K) { \
  71. if (likely(n)) { \
  72. size_t skip = i->iov_offset; \
  73. if (unlikely(i->type & ITER_BVEC)) { \
  74. struct bio_vec v; \
  75. struct bvec_iter __bi; \
  76. iterate_bvec(i, n, v, __bi, skip, (B)) \
  77. } else if (unlikely(i->type & ITER_KVEC)) { \
  78. const struct kvec *kvec; \
  79. struct kvec v; \
  80. iterate_kvec(i, n, v, kvec, skip, (K)) \
  81. } else { \
  82. const struct iovec *iov; \
  83. struct iovec v; \
  84. iterate_iovec(i, n, v, iov, skip, (I)) \
  85. } \
  86. } \
  87. }
  88. #define iterate_and_advance(i, n, v, I, B, K) { \
  89. if (unlikely(i->count < n)) \
  90. n = i->count; \
  91. if (i->count) { \
  92. size_t skip = i->iov_offset; \
  93. if (unlikely(i->type & ITER_BVEC)) { \
  94. const struct bio_vec *bvec = i->bvec; \
  95. struct bio_vec v; \
  96. struct bvec_iter __bi; \
  97. iterate_bvec(i, n, v, __bi, skip, (B)) \
  98. i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
  99. i->nr_segs -= i->bvec - bvec; \
  100. skip = __bi.bi_bvec_done; \
  101. } else if (unlikely(i->type & ITER_KVEC)) { \
  102. const struct kvec *kvec; \
  103. struct kvec v; \
  104. iterate_kvec(i, n, v, kvec, skip, (K)) \
  105. if (skip == kvec->iov_len) { \
  106. kvec++; \
  107. skip = 0; \
  108. } \
  109. i->nr_segs -= kvec - i->kvec; \
  110. i->kvec = kvec; \
  111. } else { \
  112. const struct iovec *iov; \
  113. struct iovec v; \
  114. iterate_iovec(i, n, v, iov, skip, (I)) \
  115. if (skip == iov->iov_len) { \
  116. iov++; \
  117. skip = 0; \
  118. } \
  119. i->nr_segs -= iov - i->iov; \
  120. i->iov = iov; \
  121. } \
  122. i->count -= n; \
  123. i->iov_offset = skip; \
  124. } \
  125. }
  126. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  127. struct iov_iter *i)
  128. {
  129. size_t skip, copy, left, wanted;
  130. const struct iovec *iov;
  131. char __user *buf;
  132. void *kaddr, *from;
  133. if (unlikely(bytes > i->count))
  134. bytes = i->count;
  135. if (unlikely(!bytes))
  136. return 0;
  137. wanted = bytes;
  138. iov = i->iov;
  139. skip = i->iov_offset;
  140. buf = iov->iov_base + skip;
  141. copy = min(bytes, iov->iov_len - skip);
  142. if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
  143. kaddr = kmap_atomic(page);
  144. from = kaddr + offset;
  145. /* first chunk, usually the only one */
  146. left = __copy_to_user_inatomic(buf, from, copy);
  147. copy -= left;
  148. skip += copy;
  149. from += copy;
  150. bytes -= copy;
  151. while (unlikely(!left && bytes)) {
  152. iov++;
  153. buf = iov->iov_base;
  154. copy = min(bytes, iov->iov_len);
  155. left = __copy_to_user_inatomic(buf, from, copy);
  156. copy -= left;
  157. skip = copy;
  158. from += copy;
  159. bytes -= copy;
  160. }
  161. if (likely(!bytes)) {
  162. kunmap_atomic(kaddr);
  163. goto done;
  164. }
  165. offset = from - kaddr;
  166. buf += copy;
  167. kunmap_atomic(kaddr);
  168. copy = min(bytes, iov->iov_len - skip);
  169. }
  170. /* Too bad - revert to non-atomic kmap */
  171. kaddr = kmap(page);
  172. from = kaddr + offset;
  173. left = __copy_to_user(buf, from, copy);
  174. copy -= left;
  175. skip += copy;
  176. from += copy;
  177. bytes -= copy;
  178. while (unlikely(!left && bytes)) {
  179. iov++;
  180. buf = iov->iov_base;
  181. copy = min(bytes, iov->iov_len);
  182. left = __copy_to_user(buf, from, copy);
  183. copy -= left;
  184. skip = copy;
  185. from += copy;
  186. bytes -= copy;
  187. }
  188. kunmap(page);
  189. done:
  190. if (skip == iov->iov_len) {
  191. iov++;
  192. skip = 0;
  193. }
  194. i->count -= wanted - bytes;
  195. i->nr_segs -= iov - i->iov;
  196. i->iov = iov;
  197. i->iov_offset = skip;
  198. return wanted - bytes;
  199. }
  200. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  201. struct iov_iter *i)
  202. {
  203. size_t skip, copy, left, wanted;
  204. const struct iovec *iov;
  205. char __user *buf;
  206. void *kaddr, *to;
  207. if (unlikely(bytes > i->count))
  208. bytes = i->count;
  209. if (unlikely(!bytes))
  210. return 0;
  211. wanted = bytes;
  212. iov = i->iov;
  213. skip = i->iov_offset;
  214. buf = iov->iov_base + skip;
  215. copy = min(bytes, iov->iov_len - skip);
  216. if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
  217. kaddr = kmap_atomic(page);
  218. to = kaddr + offset;
  219. /* first chunk, usually the only one */
  220. left = __copy_from_user_inatomic(to, buf, copy);
  221. copy -= left;
  222. skip += copy;
  223. to += copy;
  224. bytes -= copy;
  225. while (unlikely(!left && bytes)) {
  226. iov++;
  227. buf = iov->iov_base;
  228. copy = min(bytes, iov->iov_len);
  229. left = __copy_from_user_inatomic(to, buf, copy);
  230. copy -= left;
  231. skip = copy;
  232. to += copy;
  233. bytes -= copy;
  234. }
  235. if (likely(!bytes)) {
  236. kunmap_atomic(kaddr);
  237. goto done;
  238. }
  239. offset = to - kaddr;
  240. buf += copy;
  241. kunmap_atomic(kaddr);
  242. copy = min(bytes, iov->iov_len - skip);
  243. }
  244. /* Too bad - revert to non-atomic kmap */
  245. kaddr = kmap(page);
  246. to = kaddr + offset;
  247. left = __copy_from_user(to, buf, copy);
  248. copy -= left;
  249. skip += copy;
  250. to += copy;
  251. bytes -= copy;
  252. while (unlikely(!left && bytes)) {
  253. iov++;
  254. buf = iov->iov_base;
  255. copy = min(bytes, iov->iov_len);
  256. left = __copy_from_user(to, buf, copy);
  257. copy -= left;
  258. skip = copy;
  259. to += copy;
  260. bytes -= copy;
  261. }
  262. kunmap(page);
  263. done:
  264. if (skip == iov->iov_len) {
  265. iov++;
  266. skip = 0;
  267. }
  268. i->count -= wanted - bytes;
  269. i->nr_segs -= iov - i->iov;
  270. i->iov = iov;
  271. i->iov_offset = skip;
  272. return wanted - bytes;
  273. }
  274. #ifdef PIPE_PARANOIA
  275. static bool sanity(const struct iov_iter *i)
  276. {
  277. struct pipe_inode_info *pipe = i->pipe;
  278. int idx = i->idx;
  279. int next = pipe->curbuf + pipe->nrbufs;
  280. if (i->iov_offset) {
  281. struct pipe_buffer *p;
  282. if (unlikely(!pipe->nrbufs))
  283. goto Bad; // pipe must be non-empty
  284. if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
  285. goto Bad; // must be at the last buffer...
  286. p = &pipe->bufs[idx];
  287. if (unlikely(p->offset + p->len != i->iov_offset))
  288. goto Bad; // ... at the end of segment
  289. } else {
  290. if (idx != (next & (pipe->buffers - 1)))
  291. goto Bad; // must be right after the last buffer
  292. }
  293. return true;
  294. Bad:
  295. printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
  296. printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
  297. pipe->curbuf, pipe->nrbufs, pipe->buffers);
  298. for (idx = 0; idx < pipe->buffers; idx++)
  299. printk(KERN_ERR "[%p %p %d %d]\n",
  300. pipe->bufs[idx].ops,
  301. pipe->bufs[idx].page,
  302. pipe->bufs[idx].offset,
  303. pipe->bufs[idx].len);
  304. WARN_ON(1);
  305. return false;
  306. }
  307. #else
  308. #define sanity(i) true
  309. #endif
  310. static inline int next_idx(int idx, struct pipe_inode_info *pipe)
  311. {
  312. return (idx + 1) & (pipe->buffers - 1);
  313. }
  314. static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
  315. struct iov_iter *i)
  316. {
  317. struct pipe_inode_info *pipe = i->pipe;
  318. struct pipe_buffer *buf;
  319. size_t off;
  320. int idx;
  321. if (unlikely(bytes > i->count))
  322. bytes = i->count;
  323. if (unlikely(!bytes))
  324. return 0;
  325. if (!sanity(i))
  326. return 0;
  327. off = i->iov_offset;
  328. idx = i->idx;
  329. buf = &pipe->bufs[idx];
  330. if (off) {
  331. if (offset == off && buf->page == page) {
  332. /* merge with the last one */
  333. buf->len += bytes;
  334. i->iov_offset += bytes;
  335. goto out;
  336. }
  337. idx = next_idx(idx, pipe);
  338. buf = &pipe->bufs[idx];
  339. }
  340. if (idx == pipe->curbuf && pipe->nrbufs)
  341. return 0;
  342. pipe->nrbufs++;
  343. buf->ops = &page_cache_pipe_buf_ops;
  344. get_page(buf->page = page);
  345. buf->offset = offset;
  346. buf->len = bytes;
  347. i->iov_offset = offset + bytes;
  348. i->idx = idx;
  349. out:
  350. i->count -= bytes;
  351. return bytes;
  352. }
  353. /*
  354. * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  355. * bytes. For each iovec, fault in each page that constitutes the iovec.
  356. *
  357. * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  358. * because it is an invalid address).
  359. */
  360. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  361. {
  362. size_t skip = i->iov_offset;
  363. const struct iovec *iov;
  364. int err;
  365. struct iovec v;
  366. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  367. iterate_iovec(i, bytes, v, iov, skip, ({
  368. err = fault_in_pages_readable(v.iov_base, v.iov_len);
  369. if (unlikely(err))
  370. return err;
  371. 0;}))
  372. }
  373. return 0;
  374. }
  375. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  376. void iov_iter_init(struct iov_iter *i, int direction,
  377. const struct iovec *iov, unsigned long nr_segs,
  378. size_t count)
  379. {
  380. /* It will get better. Eventually... */
  381. if (segment_eq(get_fs(), KERNEL_DS)) {
  382. direction |= ITER_KVEC;
  383. i->type = direction;
  384. i->kvec = (struct kvec *)iov;
  385. } else {
  386. i->type = direction;
  387. i->iov = iov;
  388. }
  389. i->nr_segs = nr_segs;
  390. i->iov_offset = 0;
  391. i->count = count;
  392. }
  393. EXPORT_SYMBOL(iov_iter_init);
  394. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  395. {
  396. char *from = kmap_atomic(page);
  397. memcpy(to, from + offset, len);
  398. kunmap_atomic(from);
  399. }
  400. static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
  401. {
  402. char *to = kmap_atomic(page);
  403. memcpy(to + offset, from, len);
  404. kunmap_atomic(to);
  405. }
  406. static void memzero_page(struct page *page, size_t offset, size_t len)
  407. {
  408. char *addr = kmap_atomic(page);
  409. memset(addr + offset, 0, len);
  410. kunmap_atomic(addr);
  411. }
  412. static inline bool allocated(struct pipe_buffer *buf)
  413. {
  414. return buf->ops == &default_pipe_buf_ops;
  415. }
  416. static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
  417. {
  418. size_t off = i->iov_offset;
  419. int idx = i->idx;
  420. if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
  421. idx = next_idx(idx, i->pipe);
  422. off = 0;
  423. }
  424. *idxp = idx;
  425. *offp = off;
  426. }
  427. static size_t push_pipe(struct iov_iter *i, size_t size,
  428. int *idxp, size_t *offp)
  429. {
  430. struct pipe_inode_info *pipe = i->pipe;
  431. size_t off;
  432. int idx;
  433. ssize_t left;
  434. if (unlikely(size > i->count))
  435. size = i->count;
  436. if (unlikely(!size))
  437. return 0;
  438. left = size;
  439. data_start(i, &idx, &off);
  440. *idxp = idx;
  441. *offp = off;
  442. if (off) {
  443. left -= PAGE_SIZE - off;
  444. if (left <= 0) {
  445. pipe->bufs[idx].len += size;
  446. return size;
  447. }
  448. pipe->bufs[idx].len = PAGE_SIZE;
  449. idx = next_idx(idx, pipe);
  450. }
  451. while (idx != pipe->curbuf || !pipe->nrbufs) {
  452. struct page *page = alloc_page(GFP_USER);
  453. if (!page)
  454. break;
  455. pipe->nrbufs++;
  456. pipe->bufs[idx].ops = &default_pipe_buf_ops;
  457. pipe->bufs[idx].page = page;
  458. pipe->bufs[idx].offset = 0;
  459. if (left <= PAGE_SIZE) {
  460. pipe->bufs[idx].len = left;
  461. return size;
  462. }
  463. pipe->bufs[idx].len = PAGE_SIZE;
  464. left -= PAGE_SIZE;
  465. idx = next_idx(idx, pipe);
  466. }
  467. return size - left;
  468. }
  469. static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
  470. struct iov_iter *i)
  471. {
  472. struct pipe_inode_info *pipe = i->pipe;
  473. size_t n, off;
  474. int idx;
  475. if (!sanity(i))
  476. return 0;
  477. bytes = n = push_pipe(i, bytes, &idx, &off);
  478. if (unlikely(!n))
  479. return 0;
  480. for ( ; n; idx = next_idx(idx, pipe), off = 0) {
  481. size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  482. memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
  483. i->idx = idx;
  484. i->iov_offset = off + chunk;
  485. n -= chunk;
  486. addr += chunk;
  487. }
  488. i->count -= bytes;
  489. return bytes;
  490. }
  491. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  492. {
  493. const char *from = addr;
  494. if (unlikely(i->type & ITER_PIPE))
  495. return copy_pipe_to_iter(addr, bytes, i);
  496. iterate_and_advance(i, bytes, v,
  497. __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
  498. v.iov_len),
  499. memcpy_to_page(v.bv_page, v.bv_offset,
  500. (from += v.bv_len) - v.bv_len, v.bv_len),
  501. memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
  502. )
  503. return bytes;
  504. }
  505. EXPORT_SYMBOL(copy_to_iter);
  506. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  507. {
  508. char *to = addr;
  509. if (unlikely(i->type & ITER_PIPE)) {
  510. WARN_ON(1);
  511. return 0;
  512. }
  513. iterate_and_advance(i, bytes, v,
  514. __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
  515. v.iov_len),
  516. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  517. v.bv_offset, v.bv_len),
  518. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  519. )
  520. return bytes;
  521. }
  522. EXPORT_SYMBOL(copy_from_iter);
  523. bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
  524. {
  525. char *to = addr;
  526. if (unlikely(i->type & ITER_PIPE)) {
  527. WARN_ON(1);
  528. return false;
  529. }
  530. if (unlikely(i->count < bytes))
  531. return false;
  532. iterate_all_kinds(i, bytes, v, ({
  533. if (__copy_from_user((to += v.iov_len) - v.iov_len,
  534. v.iov_base, v.iov_len))
  535. return false;
  536. 0;}),
  537. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  538. v.bv_offset, v.bv_len),
  539. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  540. )
  541. iov_iter_advance(i, bytes);
  542. return true;
  543. }
  544. EXPORT_SYMBOL(copy_from_iter_full);
  545. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  546. {
  547. char *to = addr;
  548. if (unlikely(i->type & ITER_PIPE)) {
  549. WARN_ON(1);
  550. return 0;
  551. }
  552. iterate_and_advance(i, bytes, v,
  553. __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  554. v.iov_base, v.iov_len),
  555. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  556. v.bv_offset, v.bv_len),
  557. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  558. )
  559. return bytes;
  560. }
  561. EXPORT_SYMBOL(copy_from_iter_nocache);
  562. bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  563. {
  564. char *to = addr;
  565. if (unlikely(i->type & ITER_PIPE)) {
  566. WARN_ON(1);
  567. return false;
  568. }
  569. if (unlikely(i->count < bytes))
  570. return false;
  571. iterate_all_kinds(i, bytes, v, ({
  572. if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  573. v.iov_base, v.iov_len))
  574. return false;
  575. 0;}),
  576. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  577. v.bv_offset, v.bv_len),
  578. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  579. )
  580. iov_iter_advance(i, bytes);
  581. return true;
  582. }
  583. EXPORT_SYMBOL(copy_from_iter_full_nocache);
  584. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  585. struct iov_iter *i)
  586. {
  587. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  588. void *kaddr = kmap_atomic(page);
  589. size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  590. kunmap_atomic(kaddr);
  591. return wanted;
  592. } else if (likely(!(i->type & ITER_PIPE)))
  593. return copy_page_to_iter_iovec(page, offset, bytes, i);
  594. else
  595. return copy_page_to_iter_pipe(page, offset, bytes, i);
  596. }
  597. EXPORT_SYMBOL(copy_page_to_iter);
  598. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  599. struct iov_iter *i)
  600. {
  601. if (unlikely(i->type & ITER_PIPE)) {
  602. WARN_ON(1);
  603. return 0;
  604. }
  605. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  606. void *kaddr = kmap_atomic(page);
  607. size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
  608. kunmap_atomic(kaddr);
  609. return wanted;
  610. } else
  611. return copy_page_from_iter_iovec(page, offset, bytes, i);
  612. }
  613. EXPORT_SYMBOL(copy_page_from_iter);
  614. static size_t pipe_zero(size_t bytes, struct iov_iter *i)
  615. {
  616. struct pipe_inode_info *pipe = i->pipe;
  617. size_t n, off;
  618. int idx;
  619. if (!sanity(i))
  620. return 0;
  621. bytes = n = push_pipe(i, bytes, &idx, &off);
  622. if (unlikely(!n))
  623. return 0;
  624. for ( ; n; idx = next_idx(idx, pipe), off = 0) {
  625. size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  626. memzero_page(pipe->bufs[idx].page, off, chunk);
  627. i->idx = idx;
  628. i->iov_offset = off + chunk;
  629. n -= chunk;
  630. }
  631. i->count -= bytes;
  632. return bytes;
  633. }
  634. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  635. {
  636. if (unlikely(i->type & ITER_PIPE))
  637. return pipe_zero(bytes, i);
  638. iterate_and_advance(i, bytes, v,
  639. __clear_user(v.iov_base, v.iov_len),
  640. memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  641. memset(v.iov_base, 0, v.iov_len)
  642. )
  643. return bytes;
  644. }
  645. EXPORT_SYMBOL(iov_iter_zero);
  646. size_t iov_iter_copy_from_user_atomic(struct page *page,
  647. struct iov_iter *i, unsigned long offset, size_t bytes)
  648. {
  649. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  650. if (unlikely(i->type & ITER_PIPE)) {
  651. kunmap_atomic(kaddr);
  652. WARN_ON(1);
  653. return 0;
  654. }
  655. iterate_all_kinds(i, bytes, v,
  656. __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  657. v.iov_base, v.iov_len),
  658. memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
  659. v.bv_offset, v.bv_len),
  660. memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  661. )
  662. kunmap_atomic(kaddr);
  663. return bytes;
  664. }
  665. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  666. static inline void pipe_truncate(struct iov_iter *i)
  667. {
  668. struct pipe_inode_info *pipe = i->pipe;
  669. if (pipe->nrbufs) {
  670. size_t off = i->iov_offset;
  671. int idx = i->idx;
  672. int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
  673. if (off) {
  674. pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
  675. idx = next_idx(idx, pipe);
  676. nrbufs++;
  677. }
  678. while (pipe->nrbufs > nrbufs) {
  679. pipe_buf_release(pipe, &pipe->bufs[idx]);
  680. idx = next_idx(idx, pipe);
  681. pipe->nrbufs--;
  682. }
  683. }
  684. }
  685. static void pipe_advance(struct iov_iter *i, size_t size)
  686. {
  687. struct pipe_inode_info *pipe = i->pipe;
  688. if (unlikely(i->count < size))
  689. size = i->count;
  690. if (size) {
  691. struct pipe_buffer *buf;
  692. size_t off = i->iov_offset, left = size;
  693. int idx = i->idx;
  694. if (off) /* make it relative to the beginning of buffer */
  695. left += off - pipe->bufs[idx].offset;
  696. while (1) {
  697. buf = &pipe->bufs[idx];
  698. if (left <= buf->len)
  699. break;
  700. left -= buf->len;
  701. idx = next_idx(idx, pipe);
  702. }
  703. i->idx = idx;
  704. i->iov_offset = buf->offset + left;
  705. }
  706. i->count -= size;
  707. /* ... and discard everything past that point */
  708. pipe_truncate(i);
  709. }
  710. void iov_iter_advance(struct iov_iter *i, size_t size)
  711. {
  712. if (unlikely(i->type & ITER_PIPE)) {
  713. pipe_advance(i, size);
  714. return;
  715. }
  716. iterate_and_advance(i, size, v, 0, 0, 0)
  717. }
  718. EXPORT_SYMBOL(iov_iter_advance);
  719. void iov_iter_revert(struct iov_iter *i, size_t unroll)
  720. {
  721. if (!unroll)
  722. return;
  723. if (WARN_ON(unroll > MAX_RW_COUNT))
  724. return;
  725. i->count += unroll;
  726. if (unlikely(i->type & ITER_PIPE)) {
  727. struct pipe_inode_info *pipe = i->pipe;
  728. int idx = i->idx;
  729. size_t off = i->iov_offset;
  730. while (1) {
  731. size_t n = off - pipe->bufs[idx].offset;
  732. if (unroll < n) {
  733. off -= (n - unroll);
  734. break;
  735. }
  736. unroll -= n;
  737. if (!unroll && idx == i->start_idx) {
  738. off = 0;
  739. break;
  740. }
  741. if (!idx--)
  742. idx = pipe->buffers - 1;
  743. off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
  744. }
  745. i->iov_offset = off;
  746. i->idx = idx;
  747. pipe_truncate(i);
  748. return;
  749. }
  750. if (unroll <= i->iov_offset) {
  751. i->iov_offset -= unroll;
  752. return;
  753. }
  754. unroll -= i->iov_offset;
  755. if (i->type & ITER_BVEC) {
  756. const struct bio_vec *bvec = i->bvec;
  757. while (1) {
  758. size_t n = (--bvec)->bv_len;
  759. i->nr_segs++;
  760. if (unroll <= n) {
  761. i->bvec = bvec;
  762. i->iov_offset = n - unroll;
  763. return;
  764. }
  765. unroll -= n;
  766. }
  767. } else { /* same logics for iovec and kvec */
  768. const struct iovec *iov = i->iov;
  769. while (1) {
  770. size_t n = (--iov)->iov_len;
  771. i->nr_segs++;
  772. if (unroll <= n) {
  773. i->iov = iov;
  774. i->iov_offset = n - unroll;
  775. return;
  776. }
  777. unroll -= n;
  778. }
  779. }
  780. }
  781. EXPORT_SYMBOL(iov_iter_revert);
  782. /*
  783. * Return the count of just the current iov_iter segment.
  784. */
  785. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  786. {
  787. if (unlikely(i->type & ITER_PIPE))
  788. return i->count; // it is a silly place, anyway
  789. if (i->nr_segs == 1)
  790. return i->count;
  791. else if (i->type & ITER_BVEC)
  792. return min(i->count, i->bvec->bv_len - i->iov_offset);
  793. else
  794. return min(i->count, i->iov->iov_len - i->iov_offset);
  795. }
  796. EXPORT_SYMBOL(iov_iter_single_seg_count);
  797. void iov_iter_kvec(struct iov_iter *i, int direction,
  798. const struct kvec *kvec, unsigned long nr_segs,
  799. size_t count)
  800. {
  801. BUG_ON(!(direction & ITER_KVEC));
  802. i->type = direction;
  803. i->kvec = kvec;
  804. i->nr_segs = nr_segs;
  805. i->iov_offset = 0;
  806. i->count = count;
  807. }
  808. EXPORT_SYMBOL(iov_iter_kvec);
  809. void iov_iter_bvec(struct iov_iter *i, int direction,
  810. const struct bio_vec *bvec, unsigned long nr_segs,
  811. size_t count)
  812. {
  813. BUG_ON(!(direction & ITER_BVEC));
  814. i->type = direction;
  815. i->bvec = bvec;
  816. i->nr_segs = nr_segs;
  817. i->iov_offset = 0;
  818. i->count = count;
  819. }
  820. EXPORT_SYMBOL(iov_iter_bvec);
  821. void iov_iter_pipe(struct iov_iter *i, int direction,
  822. struct pipe_inode_info *pipe,
  823. size_t count)
  824. {
  825. BUG_ON(direction != ITER_PIPE);
  826. WARN_ON(pipe->nrbufs == pipe->buffers);
  827. i->type = direction;
  828. i->pipe = pipe;
  829. i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
  830. i->iov_offset = 0;
  831. i->count = count;
  832. i->start_idx = i->idx;
  833. }
  834. EXPORT_SYMBOL(iov_iter_pipe);
  835. unsigned long iov_iter_alignment(const struct iov_iter *i)
  836. {
  837. unsigned long res = 0;
  838. size_t size = i->count;
  839. if (unlikely(i->type & ITER_PIPE)) {
  840. if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
  841. return size | i->iov_offset;
  842. return size;
  843. }
  844. iterate_all_kinds(i, size, v,
  845. (res |= (unsigned long)v.iov_base | v.iov_len, 0),
  846. res |= v.bv_offset | v.bv_len,
  847. res |= (unsigned long)v.iov_base | v.iov_len
  848. )
  849. return res;
  850. }
  851. EXPORT_SYMBOL(iov_iter_alignment);
  852. unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
  853. {
  854. unsigned long res = 0;
  855. size_t size = i->count;
  856. if (unlikely(i->type & ITER_PIPE)) {
  857. WARN_ON(1);
  858. return ~0U;
  859. }
  860. iterate_all_kinds(i, size, v,
  861. (res |= (!res ? 0 : (unsigned long)v.iov_base) |
  862. (size != v.iov_len ? size : 0), 0),
  863. (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
  864. (size != v.bv_len ? size : 0)),
  865. (res |= (!res ? 0 : (unsigned long)v.iov_base) |
  866. (size != v.iov_len ? size : 0))
  867. );
  868. return res;
  869. }
  870. EXPORT_SYMBOL(iov_iter_gap_alignment);
  871. static inline size_t __pipe_get_pages(struct iov_iter *i,
  872. size_t maxsize,
  873. struct page **pages,
  874. int idx,
  875. size_t *start)
  876. {
  877. struct pipe_inode_info *pipe = i->pipe;
  878. ssize_t n = push_pipe(i, maxsize, &idx, start);
  879. if (!n)
  880. return -EFAULT;
  881. maxsize = n;
  882. n += *start;
  883. while (n > 0) {
  884. get_page(*pages++ = pipe->bufs[idx].page);
  885. idx = next_idx(idx, pipe);
  886. n -= PAGE_SIZE;
  887. }
  888. return maxsize;
  889. }
  890. static ssize_t pipe_get_pages(struct iov_iter *i,
  891. struct page **pages, size_t maxsize, unsigned maxpages,
  892. size_t *start)
  893. {
  894. unsigned npages;
  895. size_t capacity;
  896. int idx;
  897. if (!maxsize)
  898. return 0;
  899. if (!sanity(i))
  900. return -EFAULT;
  901. data_start(i, &idx, start);
  902. /* some of this one + all after this one */
  903. npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
  904. capacity = min(npages,maxpages) * PAGE_SIZE - *start;
  905. return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
  906. }
  907. ssize_t iov_iter_get_pages(struct iov_iter *i,
  908. struct page **pages, size_t maxsize, unsigned maxpages,
  909. size_t *start)
  910. {
  911. if (maxsize > i->count)
  912. maxsize = i->count;
  913. if (unlikely(i->type & ITER_PIPE))
  914. return pipe_get_pages(i, pages, maxsize, maxpages, start);
  915. iterate_all_kinds(i, maxsize, v, ({
  916. unsigned long addr = (unsigned long)v.iov_base;
  917. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  918. int n;
  919. int res;
  920. if (len > maxpages * PAGE_SIZE)
  921. len = maxpages * PAGE_SIZE;
  922. addr &= ~(PAGE_SIZE - 1);
  923. n = DIV_ROUND_UP(len, PAGE_SIZE);
  924. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  925. if (unlikely(res < 0))
  926. return res;
  927. return (res == n ? len : res * PAGE_SIZE) - *start;
  928. 0;}),({
  929. /* can't be more than PAGE_SIZE */
  930. *start = v.bv_offset;
  931. get_page(*pages = v.bv_page);
  932. return v.bv_len;
  933. }),({
  934. return -EFAULT;
  935. })
  936. )
  937. return 0;
  938. }
  939. EXPORT_SYMBOL(iov_iter_get_pages);
  940. static struct page **get_pages_array(size_t n)
  941. {
  942. struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  943. if (!p)
  944. p = vmalloc(n * sizeof(struct page *));
  945. return p;
  946. }
  947. static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
  948. struct page ***pages, size_t maxsize,
  949. size_t *start)
  950. {
  951. struct page **p;
  952. size_t n;
  953. int idx;
  954. int npages;
  955. if (!maxsize)
  956. return 0;
  957. if (!sanity(i))
  958. return -EFAULT;
  959. data_start(i, &idx, start);
  960. /* some of this one + all after this one */
  961. npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
  962. n = npages * PAGE_SIZE - *start;
  963. if (maxsize > n)
  964. maxsize = n;
  965. else
  966. npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
  967. p = get_pages_array(npages);
  968. if (!p)
  969. return -ENOMEM;
  970. n = __pipe_get_pages(i, maxsize, p, idx, start);
  971. if (n > 0)
  972. *pages = p;
  973. else
  974. kvfree(p);
  975. return n;
  976. }
  977. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  978. struct page ***pages, size_t maxsize,
  979. size_t *start)
  980. {
  981. struct page **p;
  982. if (maxsize > i->count)
  983. maxsize = i->count;
  984. if (unlikely(i->type & ITER_PIPE))
  985. return pipe_get_pages_alloc(i, pages, maxsize, start);
  986. iterate_all_kinds(i, maxsize, v, ({
  987. unsigned long addr = (unsigned long)v.iov_base;
  988. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  989. int n;
  990. int res;
  991. addr &= ~(PAGE_SIZE - 1);
  992. n = DIV_ROUND_UP(len, PAGE_SIZE);
  993. p = get_pages_array(n);
  994. if (!p)
  995. return -ENOMEM;
  996. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  997. if (unlikely(res < 0)) {
  998. kvfree(p);
  999. return res;
  1000. }
  1001. *pages = p;
  1002. return (res == n ? len : res * PAGE_SIZE) - *start;
  1003. 0;}),({
  1004. /* can't be more than PAGE_SIZE */
  1005. *start = v.bv_offset;
  1006. *pages = p = get_pages_array(1);
  1007. if (!p)
  1008. return -ENOMEM;
  1009. get_page(*p = v.bv_page);
  1010. return v.bv_len;
  1011. }),({
  1012. return -EFAULT;
  1013. })
  1014. )
  1015. return 0;
  1016. }
  1017. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  1018. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  1019. struct iov_iter *i)
  1020. {
  1021. char *to = addr;
  1022. __wsum sum, next;
  1023. size_t off = 0;
  1024. sum = *csum;
  1025. if (unlikely(i->type & ITER_PIPE)) {
  1026. WARN_ON(1);
  1027. return 0;
  1028. }
  1029. iterate_and_advance(i, bytes, v, ({
  1030. int err = 0;
  1031. next = csum_and_copy_from_user(v.iov_base,
  1032. (to += v.iov_len) - v.iov_len,
  1033. v.iov_len, 0, &err);
  1034. if (!err) {
  1035. sum = csum_block_add(sum, next, off);
  1036. off += v.iov_len;
  1037. }
  1038. err ? v.iov_len : 0;
  1039. }), ({
  1040. char *p = kmap_atomic(v.bv_page);
  1041. next = csum_partial_copy_nocheck(p + v.bv_offset,
  1042. (to += v.bv_len) - v.bv_len,
  1043. v.bv_len, 0);
  1044. kunmap_atomic(p);
  1045. sum = csum_block_add(sum, next, off);
  1046. off += v.bv_len;
  1047. }),({
  1048. next = csum_partial_copy_nocheck(v.iov_base,
  1049. (to += v.iov_len) - v.iov_len,
  1050. v.iov_len, 0);
  1051. sum = csum_block_add(sum, next, off);
  1052. off += v.iov_len;
  1053. })
  1054. )
  1055. *csum = sum;
  1056. return bytes;
  1057. }
  1058. EXPORT_SYMBOL(csum_and_copy_from_iter);
  1059. bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
  1060. struct iov_iter *i)
  1061. {
  1062. char *to = addr;
  1063. __wsum sum, next;
  1064. size_t off = 0;
  1065. sum = *csum;
  1066. if (unlikely(i->type & ITER_PIPE)) {
  1067. WARN_ON(1);
  1068. return false;
  1069. }
  1070. if (unlikely(i->count < bytes))
  1071. return false;
  1072. iterate_all_kinds(i, bytes, v, ({
  1073. int err = 0;
  1074. next = csum_and_copy_from_user(v.iov_base,
  1075. (to += v.iov_len) - v.iov_len,
  1076. v.iov_len, 0, &err);
  1077. if (err)
  1078. return false;
  1079. sum = csum_block_add(sum, next, off);
  1080. off += v.iov_len;
  1081. 0;
  1082. }), ({
  1083. char *p = kmap_atomic(v.bv_page);
  1084. next = csum_partial_copy_nocheck(p + v.bv_offset,
  1085. (to += v.bv_len) - v.bv_len,
  1086. v.bv_len, 0);
  1087. kunmap_atomic(p);
  1088. sum = csum_block_add(sum, next, off);
  1089. off += v.bv_len;
  1090. }),({
  1091. next = csum_partial_copy_nocheck(v.iov_base,
  1092. (to += v.iov_len) - v.iov_len,
  1093. v.iov_len, 0);
  1094. sum = csum_block_add(sum, next, off);
  1095. off += v.iov_len;
  1096. })
  1097. )
  1098. *csum = sum;
  1099. iov_iter_advance(i, bytes);
  1100. return true;
  1101. }
  1102. EXPORT_SYMBOL(csum_and_copy_from_iter_full);
  1103. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
  1104. struct iov_iter *i)
  1105. {
  1106. const char *from = addr;
  1107. __wsum sum, next;
  1108. size_t off = 0;
  1109. sum = *csum;
  1110. if (unlikely(i->type & ITER_PIPE)) {
  1111. WARN_ON(1); /* for now */
  1112. return 0;
  1113. }
  1114. iterate_and_advance(i, bytes, v, ({
  1115. int err = 0;
  1116. next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
  1117. v.iov_base,
  1118. v.iov_len, 0, &err);
  1119. if (!err) {
  1120. sum = csum_block_add(sum, next, off);
  1121. off += v.iov_len;
  1122. }
  1123. err ? v.iov_len : 0;
  1124. }), ({
  1125. char *p = kmap_atomic(v.bv_page);
  1126. next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
  1127. p + v.bv_offset,
  1128. v.bv_len, 0);
  1129. kunmap_atomic(p);
  1130. sum = csum_block_add(sum, next, off);
  1131. off += v.bv_len;
  1132. }),({
  1133. next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
  1134. v.iov_base,
  1135. v.iov_len, 0);
  1136. sum = csum_block_add(sum, next, off);
  1137. off += v.iov_len;
  1138. })
  1139. )
  1140. *csum = sum;
  1141. return bytes;
  1142. }
  1143. EXPORT_SYMBOL(csum_and_copy_to_iter);
  1144. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  1145. {
  1146. size_t size = i->count;
  1147. int npages = 0;
  1148. if (!size)
  1149. return 0;
  1150. if (unlikely(i->type & ITER_PIPE)) {
  1151. struct pipe_inode_info *pipe = i->pipe;
  1152. size_t off;
  1153. int idx;
  1154. if (!sanity(i))
  1155. return 0;
  1156. data_start(i, &idx, &off);
  1157. /* some of this one + all after this one */
  1158. npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
  1159. if (npages >= maxpages)
  1160. return maxpages;
  1161. } else iterate_all_kinds(i, size, v, ({
  1162. unsigned long p = (unsigned long)v.iov_base;
  1163. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  1164. - p / PAGE_SIZE;
  1165. if (npages >= maxpages)
  1166. return maxpages;
  1167. 0;}),({
  1168. npages++;
  1169. if (npages >= maxpages)
  1170. return maxpages;
  1171. }),({
  1172. unsigned long p = (unsigned long)v.iov_base;
  1173. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  1174. - p / PAGE_SIZE;
  1175. if (npages >= maxpages)
  1176. return maxpages;
  1177. })
  1178. )
  1179. return npages;
  1180. }
  1181. EXPORT_SYMBOL(iov_iter_npages);
  1182. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  1183. {
  1184. *new = *old;
  1185. if (unlikely(new->type & ITER_PIPE)) {
  1186. WARN_ON(1);
  1187. return NULL;
  1188. }
  1189. if (new->type & ITER_BVEC)
  1190. return new->bvec = kmemdup(new->bvec,
  1191. new->nr_segs * sizeof(struct bio_vec),
  1192. flags);
  1193. else
  1194. /* iovec and kvec have identical layout */
  1195. return new->iov = kmemdup(new->iov,
  1196. new->nr_segs * sizeof(struct iovec),
  1197. flags);
  1198. }
  1199. EXPORT_SYMBOL(dup_iter);
  1200. /**
  1201. * import_iovec() - Copy an array of &struct iovec from userspace
  1202. * into the kernel, check that it is valid, and initialize a new
  1203. * &struct iov_iter iterator to access it.
  1204. *
  1205. * @type: One of %READ or %WRITE.
  1206. * @uvector: Pointer to the userspace array.
  1207. * @nr_segs: Number of elements in userspace array.
  1208. * @fast_segs: Number of elements in @iov.
  1209. * @iov: (input and output parameter) Pointer to pointer to (usually small
  1210. * on-stack) kernel array.
  1211. * @i: Pointer to iterator that will be initialized on success.
  1212. *
  1213. * If the array pointed to by *@iov is large enough to hold all @nr_segs,
  1214. * then this function places %NULL in *@iov on return. Otherwise, a new
  1215. * array will be allocated and the result placed in *@iov. This means that
  1216. * the caller may call kfree() on *@iov regardless of whether the small
  1217. * on-stack array was used or not (and regardless of whether this function
  1218. * returns an error or not).
  1219. *
  1220. * Return: 0 on success or negative error code on error.
  1221. */
  1222. int import_iovec(int type, const struct iovec __user * uvector,
  1223. unsigned nr_segs, unsigned fast_segs,
  1224. struct iovec **iov, struct iov_iter *i)
  1225. {
  1226. ssize_t n;
  1227. struct iovec *p;
  1228. n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  1229. *iov, &p);
  1230. if (n < 0) {
  1231. if (p != *iov)
  1232. kfree(p);
  1233. *iov = NULL;
  1234. return n;
  1235. }
  1236. iov_iter_init(i, type, p, nr_segs, n);
  1237. *iov = p == *iov ? NULL : p;
  1238. return 0;
  1239. }
  1240. EXPORT_SYMBOL(import_iovec);
  1241. #ifdef CONFIG_COMPAT
  1242. #include <linux/compat.h>
  1243. int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  1244. unsigned nr_segs, unsigned fast_segs,
  1245. struct iovec **iov, struct iov_iter *i)
  1246. {
  1247. ssize_t n;
  1248. struct iovec *p;
  1249. n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  1250. *iov, &p);
  1251. if (n < 0) {
  1252. if (p != *iov)
  1253. kfree(p);
  1254. *iov = NULL;
  1255. return n;
  1256. }
  1257. iov_iter_init(i, type, p, nr_segs, n);
  1258. *iov = p == *iov ? NULL : p;
  1259. return 0;
  1260. }
  1261. #endif
  1262. int import_single_range(int rw, void __user *buf, size_t len,
  1263. struct iovec *iov, struct iov_iter *i)
  1264. {
  1265. if (len > MAX_RW_COUNT)
  1266. len = MAX_RW_COUNT;
  1267. if (unlikely(!access_ok(!rw, buf, len)))
  1268. return -EFAULT;
  1269. iov->iov_base = buf;
  1270. iov->iov_len = len;
  1271. iov_iter_init(i, rw, iov, 1, len);
  1272. return 0;
  1273. }
  1274. EXPORT_SYMBOL(import_single_range);