iov_iter.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #include <linux/vmalloc.h>
  6. #include <linux/splice.h>
  7. #include <net/checksum.h>
  8. #define PIPE_PARANOIA /* for now */
  9. #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
  10. size_t left; \
  11. size_t wanted = n; \
  12. __p = i->iov; \
  13. __v.iov_len = min(n, __p->iov_len - skip); \
  14. if (likely(__v.iov_len)) { \
  15. __v.iov_base = __p->iov_base + skip; \
  16. left = (STEP); \
  17. __v.iov_len -= left; \
  18. skip += __v.iov_len; \
  19. n -= __v.iov_len; \
  20. } else { \
  21. left = 0; \
  22. } \
  23. while (unlikely(!left && n)) { \
  24. __p++; \
  25. __v.iov_len = min(n, __p->iov_len); \
  26. if (unlikely(!__v.iov_len)) \
  27. continue; \
  28. __v.iov_base = __p->iov_base; \
  29. left = (STEP); \
  30. __v.iov_len -= left; \
  31. skip = __v.iov_len; \
  32. n -= __v.iov_len; \
  33. } \
  34. n = wanted - n; \
  35. }
  36. #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
  37. size_t wanted = n; \
  38. __p = i->kvec; \
  39. __v.iov_len = min(n, __p->iov_len - skip); \
  40. if (likely(__v.iov_len)) { \
  41. __v.iov_base = __p->iov_base + skip; \
  42. (void)(STEP); \
  43. skip += __v.iov_len; \
  44. n -= __v.iov_len; \
  45. } \
  46. while (unlikely(n)) { \
  47. __p++; \
  48. __v.iov_len = min(n, __p->iov_len); \
  49. if (unlikely(!__v.iov_len)) \
  50. continue; \
  51. __v.iov_base = __p->iov_base; \
  52. (void)(STEP); \
  53. skip = __v.iov_len; \
  54. n -= __v.iov_len; \
  55. } \
  56. n = wanted; \
  57. }
  58. #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
  59. struct bvec_iter __start; \
  60. __start.bi_size = n; \
  61. __start.bi_bvec_done = skip; \
  62. __start.bi_idx = 0; \
  63. for_each_bvec(__v, i->bvec, __bi, __start) { \
  64. if (!__v.bv_len) \
  65. continue; \
  66. (void)(STEP); \
  67. } \
  68. }
  69. #define iterate_all_kinds(i, n, v, I, B, K) { \
  70. size_t skip = i->iov_offset; \
  71. if (unlikely(i->type & ITER_BVEC)) { \
  72. struct bio_vec v; \
  73. struct bvec_iter __bi; \
  74. iterate_bvec(i, n, v, __bi, skip, (B)) \
  75. } else if (unlikely(i->type & ITER_KVEC)) { \
  76. const struct kvec *kvec; \
  77. struct kvec v; \
  78. iterate_kvec(i, n, v, kvec, skip, (K)) \
  79. } else { \
  80. const struct iovec *iov; \
  81. struct iovec v; \
  82. iterate_iovec(i, n, v, iov, skip, (I)) \
  83. } \
  84. }
  85. #define iterate_and_advance(i, n, v, I, B, K) { \
  86. if (unlikely(i->count < n)) \
  87. n = i->count; \
  88. if (i->count) { \
  89. size_t skip = i->iov_offset; \
  90. if (unlikely(i->type & ITER_BVEC)) { \
  91. const struct bio_vec *bvec = i->bvec; \
  92. struct bio_vec v; \
  93. struct bvec_iter __bi; \
  94. iterate_bvec(i, n, v, __bi, skip, (B)) \
  95. i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
  96. i->nr_segs -= i->bvec - bvec; \
  97. skip = __bi.bi_bvec_done; \
  98. } else if (unlikely(i->type & ITER_KVEC)) { \
  99. const struct kvec *kvec; \
  100. struct kvec v; \
  101. iterate_kvec(i, n, v, kvec, skip, (K)) \
  102. if (skip == kvec->iov_len) { \
  103. kvec++; \
  104. skip = 0; \
  105. } \
  106. i->nr_segs -= kvec - i->kvec; \
  107. i->kvec = kvec; \
  108. } else { \
  109. const struct iovec *iov; \
  110. struct iovec v; \
  111. iterate_iovec(i, n, v, iov, skip, (I)) \
  112. if (skip == iov->iov_len) { \
  113. iov++; \
  114. skip = 0; \
  115. } \
  116. i->nr_segs -= iov - i->iov; \
  117. i->iov = iov; \
  118. } \
  119. i->count -= n; \
  120. i->iov_offset = skip; \
  121. } \
  122. }
  123. static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  124. struct iov_iter *i)
  125. {
  126. size_t skip, copy, left, wanted;
  127. const struct iovec *iov;
  128. char __user *buf;
  129. void *kaddr, *from;
  130. if (unlikely(bytes > i->count))
  131. bytes = i->count;
  132. if (unlikely(!bytes))
  133. return 0;
  134. wanted = bytes;
  135. iov = i->iov;
  136. skip = i->iov_offset;
  137. buf = iov->iov_base + skip;
  138. copy = min(bytes, iov->iov_len - skip);
  139. if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
  140. kaddr = kmap_atomic(page);
  141. from = kaddr + offset;
  142. /* first chunk, usually the only one */
  143. left = __copy_to_user_inatomic(buf, from, copy);
  144. copy -= left;
  145. skip += copy;
  146. from += copy;
  147. bytes -= copy;
  148. while (unlikely(!left && bytes)) {
  149. iov++;
  150. buf = iov->iov_base;
  151. copy = min(bytes, iov->iov_len);
  152. left = __copy_to_user_inatomic(buf, from, copy);
  153. copy -= left;
  154. skip = copy;
  155. from += copy;
  156. bytes -= copy;
  157. }
  158. if (likely(!bytes)) {
  159. kunmap_atomic(kaddr);
  160. goto done;
  161. }
  162. offset = from - kaddr;
  163. buf += copy;
  164. kunmap_atomic(kaddr);
  165. copy = min(bytes, iov->iov_len - skip);
  166. }
  167. /* Too bad - revert to non-atomic kmap */
  168. kaddr = kmap(page);
  169. from = kaddr + offset;
  170. left = __copy_to_user(buf, from, copy);
  171. copy -= left;
  172. skip += copy;
  173. from += copy;
  174. bytes -= copy;
  175. while (unlikely(!left && bytes)) {
  176. iov++;
  177. buf = iov->iov_base;
  178. copy = min(bytes, iov->iov_len);
  179. left = __copy_to_user(buf, from, copy);
  180. copy -= left;
  181. skip = copy;
  182. from += copy;
  183. bytes -= copy;
  184. }
  185. kunmap(page);
  186. done:
  187. if (skip == iov->iov_len) {
  188. iov++;
  189. skip = 0;
  190. }
  191. i->count -= wanted - bytes;
  192. i->nr_segs -= iov - i->iov;
  193. i->iov = iov;
  194. i->iov_offset = skip;
  195. return wanted - bytes;
  196. }
  197. static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
  198. struct iov_iter *i)
  199. {
  200. size_t skip, copy, left, wanted;
  201. const struct iovec *iov;
  202. char __user *buf;
  203. void *kaddr, *to;
  204. if (unlikely(bytes > i->count))
  205. bytes = i->count;
  206. if (unlikely(!bytes))
  207. return 0;
  208. wanted = bytes;
  209. iov = i->iov;
  210. skip = i->iov_offset;
  211. buf = iov->iov_base + skip;
  212. copy = min(bytes, iov->iov_len - skip);
  213. if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
  214. kaddr = kmap_atomic(page);
  215. to = kaddr + offset;
  216. /* first chunk, usually the only one */
  217. left = __copy_from_user_inatomic(to, buf, copy);
  218. copy -= left;
  219. skip += copy;
  220. to += copy;
  221. bytes -= copy;
  222. while (unlikely(!left && bytes)) {
  223. iov++;
  224. buf = iov->iov_base;
  225. copy = min(bytes, iov->iov_len);
  226. left = __copy_from_user_inatomic(to, buf, copy);
  227. copy -= left;
  228. skip = copy;
  229. to += copy;
  230. bytes -= copy;
  231. }
  232. if (likely(!bytes)) {
  233. kunmap_atomic(kaddr);
  234. goto done;
  235. }
  236. offset = to - kaddr;
  237. buf += copy;
  238. kunmap_atomic(kaddr);
  239. copy = min(bytes, iov->iov_len - skip);
  240. }
  241. /* Too bad - revert to non-atomic kmap */
  242. kaddr = kmap(page);
  243. to = kaddr + offset;
  244. left = __copy_from_user(to, buf, copy);
  245. copy -= left;
  246. skip += copy;
  247. to += copy;
  248. bytes -= copy;
  249. while (unlikely(!left && bytes)) {
  250. iov++;
  251. buf = iov->iov_base;
  252. copy = min(bytes, iov->iov_len);
  253. left = __copy_from_user(to, buf, copy);
  254. copy -= left;
  255. skip = copy;
  256. to += copy;
  257. bytes -= copy;
  258. }
  259. kunmap(page);
  260. done:
  261. if (skip == iov->iov_len) {
  262. iov++;
  263. skip = 0;
  264. }
  265. i->count -= wanted - bytes;
  266. i->nr_segs -= iov - i->iov;
  267. i->iov = iov;
  268. i->iov_offset = skip;
  269. return wanted - bytes;
  270. }
  271. #ifdef PIPE_PARANOIA
  272. static bool sanity(const struct iov_iter *i)
  273. {
  274. struct pipe_inode_info *pipe = i->pipe;
  275. int idx = i->idx;
  276. int next = pipe->curbuf + pipe->nrbufs;
  277. if (i->iov_offset) {
  278. struct pipe_buffer *p;
  279. if (unlikely(!pipe->nrbufs))
  280. goto Bad; // pipe must be non-empty
  281. if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
  282. goto Bad; // must be at the last buffer...
  283. p = &pipe->bufs[idx];
  284. if (unlikely(p->offset + p->len != i->iov_offset))
  285. goto Bad; // ... at the end of segment
  286. } else {
  287. if (idx != (next & (pipe->buffers - 1)))
  288. goto Bad; // must be right after the last buffer
  289. }
  290. return true;
  291. Bad:
  292. printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
  293. printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
  294. pipe->curbuf, pipe->nrbufs, pipe->buffers);
  295. for (idx = 0; idx < pipe->buffers; idx++)
  296. printk(KERN_ERR "[%p %p %d %d]\n",
  297. pipe->bufs[idx].ops,
  298. pipe->bufs[idx].page,
  299. pipe->bufs[idx].offset,
  300. pipe->bufs[idx].len);
  301. WARN_ON(1);
  302. return false;
  303. }
  304. #else
  305. #define sanity(i) true
  306. #endif
  307. static inline int next_idx(int idx, struct pipe_inode_info *pipe)
  308. {
  309. return (idx + 1) & (pipe->buffers - 1);
  310. }
  311. static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
  312. struct iov_iter *i)
  313. {
  314. struct pipe_inode_info *pipe = i->pipe;
  315. struct pipe_buffer *buf;
  316. size_t off;
  317. int idx;
  318. if (unlikely(bytes > i->count))
  319. bytes = i->count;
  320. if (unlikely(!bytes))
  321. return 0;
  322. if (!sanity(i))
  323. return 0;
  324. off = i->iov_offset;
  325. idx = i->idx;
  326. buf = &pipe->bufs[idx];
  327. if (off) {
  328. if (offset == off && buf->page == page) {
  329. /* merge with the last one */
  330. buf->len += bytes;
  331. i->iov_offset += bytes;
  332. goto out;
  333. }
  334. idx = next_idx(idx, pipe);
  335. buf = &pipe->bufs[idx];
  336. }
  337. if (idx == pipe->curbuf && pipe->nrbufs)
  338. return 0;
  339. pipe->nrbufs++;
  340. buf->ops = &page_cache_pipe_buf_ops;
  341. get_page(buf->page = page);
  342. buf->offset = offset;
  343. buf->len = bytes;
  344. i->iov_offset = offset + bytes;
  345. i->idx = idx;
  346. out:
  347. i->count -= bytes;
  348. return bytes;
  349. }
  350. /*
  351. * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  352. * bytes. For each iovec, fault in each page that constitutes the iovec.
  353. *
  354. * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  355. * because it is an invalid address).
  356. */
  357. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  358. {
  359. size_t skip = i->iov_offset;
  360. const struct iovec *iov;
  361. int err;
  362. struct iovec v;
  363. if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  364. iterate_iovec(i, bytes, v, iov, skip, ({
  365. err = fault_in_pages_readable(v.iov_base, v.iov_len);
  366. if (unlikely(err))
  367. return err;
  368. 0;}))
  369. }
  370. return 0;
  371. }
  372. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  373. void iov_iter_init(struct iov_iter *i, int direction,
  374. const struct iovec *iov, unsigned long nr_segs,
  375. size_t count)
  376. {
  377. /* It will get better. Eventually... */
  378. if (segment_eq(get_fs(), KERNEL_DS)) {
  379. direction |= ITER_KVEC;
  380. i->type = direction;
  381. i->kvec = (struct kvec *)iov;
  382. } else {
  383. i->type = direction;
  384. i->iov = iov;
  385. }
  386. i->nr_segs = nr_segs;
  387. i->iov_offset = 0;
  388. i->count = count;
  389. }
  390. EXPORT_SYMBOL(iov_iter_init);
  391. static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  392. {
  393. char *from = kmap_atomic(page);
  394. memcpy(to, from + offset, len);
  395. kunmap_atomic(from);
  396. }
  397. static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
  398. {
  399. char *to = kmap_atomic(page);
  400. memcpy(to + offset, from, len);
  401. kunmap_atomic(to);
  402. }
  403. static void memzero_page(struct page *page, size_t offset, size_t len)
  404. {
  405. char *addr = kmap_atomic(page);
  406. memset(addr + offset, 0, len);
  407. kunmap_atomic(addr);
  408. }
  409. static inline bool allocated(struct pipe_buffer *buf)
  410. {
  411. return buf->ops == &default_pipe_buf_ops;
  412. }
  413. static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
  414. {
  415. size_t off = i->iov_offset;
  416. int idx = i->idx;
  417. if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
  418. idx = next_idx(idx, i->pipe);
  419. off = 0;
  420. }
  421. *idxp = idx;
  422. *offp = off;
  423. }
  424. static size_t push_pipe(struct iov_iter *i, size_t size,
  425. int *idxp, size_t *offp)
  426. {
  427. struct pipe_inode_info *pipe = i->pipe;
  428. size_t off;
  429. int idx;
  430. ssize_t left;
  431. if (unlikely(size > i->count))
  432. size = i->count;
  433. if (unlikely(!size))
  434. return 0;
  435. left = size;
  436. data_start(i, &idx, &off);
  437. *idxp = idx;
  438. *offp = off;
  439. if (off) {
  440. left -= PAGE_SIZE - off;
  441. if (left <= 0) {
  442. pipe->bufs[idx].len += size;
  443. return size;
  444. }
  445. pipe->bufs[idx].len = PAGE_SIZE;
  446. idx = next_idx(idx, pipe);
  447. }
  448. while (idx != pipe->curbuf || !pipe->nrbufs) {
  449. struct page *page = alloc_page(GFP_USER);
  450. if (!page)
  451. break;
  452. pipe->nrbufs++;
  453. pipe->bufs[idx].ops = &default_pipe_buf_ops;
  454. pipe->bufs[idx].page = page;
  455. pipe->bufs[idx].offset = 0;
  456. if (left <= PAGE_SIZE) {
  457. pipe->bufs[idx].len = left;
  458. return size;
  459. }
  460. pipe->bufs[idx].len = PAGE_SIZE;
  461. left -= PAGE_SIZE;
  462. idx = next_idx(idx, pipe);
  463. }
  464. return size - left;
  465. }
  466. static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
  467. struct iov_iter *i)
  468. {
  469. struct pipe_inode_info *pipe = i->pipe;
  470. size_t n, off;
  471. int idx;
  472. if (!sanity(i))
  473. return 0;
  474. bytes = n = push_pipe(i, bytes, &idx, &off);
  475. if (unlikely(!n))
  476. return 0;
  477. for ( ; n; idx = next_idx(idx, pipe), off = 0) {
  478. size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  479. memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
  480. i->idx = idx;
  481. i->iov_offset = off + chunk;
  482. n -= chunk;
  483. addr += chunk;
  484. }
  485. i->count -= bytes;
  486. return bytes;
  487. }
  488. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  489. {
  490. const char *from = addr;
  491. if (unlikely(i->type & ITER_PIPE))
  492. return copy_pipe_to_iter(addr, bytes, i);
  493. iterate_and_advance(i, bytes, v,
  494. __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
  495. v.iov_len),
  496. memcpy_to_page(v.bv_page, v.bv_offset,
  497. (from += v.bv_len) - v.bv_len, v.bv_len),
  498. memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
  499. )
  500. return bytes;
  501. }
  502. EXPORT_SYMBOL(copy_to_iter);
  503. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  504. {
  505. char *to = addr;
  506. if (unlikely(i->type & ITER_PIPE)) {
  507. WARN_ON(1);
  508. return 0;
  509. }
  510. iterate_and_advance(i, bytes, v,
  511. __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
  512. v.iov_len),
  513. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  514. v.bv_offset, v.bv_len),
  515. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  516. )
  517. return bytes;
  518. }
  519. EXPORT_SYMBOL(copy_from_iter);
  520. bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
  521. {
  522. char *to = addr;
  523. if (unlikely(i->type & ITER_PIPE)) {
  524. WARN_ON(1);
  525. return false;
  526. }
  527. if (unlikely(i->count < bytes)) \
  528. return false;
  529. iterate_all_kinds(i, bytes, v, ({
  530. if (__copy_from_user((to += v.iov_len) - v.iov_len,
  531. v.iov_base, v.iov_len))
  532. return false;
  533. 0;}),
  534. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  535. v.bv_offset, v.bv_len),
  536. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  537. )
  538. iov_iter_advance(i, bytes);
  539. return true;
  540. }
  541. EXPORT_SYMBOL(copy_from_iter_full);
  542. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  543. {
  544. char *to = addr;
  545. if (unlikely(i->type & ITER_PIPE)) {
  546. WARN_ON(1);
  547. return 0;
  548. }
  549. iterate_and_advance(i, bytes, v,
  550. __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  551. v.iov_base, v.iov_len),
  552. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  553. v.bv_offset, v.bv_len),
  554. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  555. )
  556. return bytes;
  557. }
  558. EXPORT_SYMBOL(copy_from_iter_nocache);
  559. bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  560. {
  561. char *to = addr;
  562. if (unlikely(i->type & ITER_PIPE)) {
  563. WARN_ON(1);
  564. return false;
  565. }
  566. if (unlikely(i->count < bytes)) \
  567. return false;
  568. iterate_all_kinds(i, bytes, v, ({
  569. if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  570. v.iov_base, v.iov_len))
  571. return false;
  572. 0;}),
  573. memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  574. v.bv_offset, v.bv_len),
  575. memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  576. )
  577. iov_iter_advance(i, bytes);
  578. return true;
  579. }
  580. EXPORT_SYMBOL(copy_from_iter_full_nocache);
  581. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  582. struct iov_iter *i)
  583. {
  584. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  585. void *kaddr = kmap_atomic(page);
  586. size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  587. kunmap_atomic(kaddr);
  588. return wanted;
  589. } else if (likely(!(i->type & ITER_PIPE)))
  590. return copy_page_to_iter_iovec(page, offset, bytes, i);
  591. else
  592. return copy_page_to_iter_pipe(page, offset, bytes, i);
  593. }
  594. EXPORT_SYMBOL(copy_page_to_iter);
  595. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  596. struct iov_iter *i)
  597. {
  598. if (unlikely(i->type & ITER_PIPE)) {
  599. WARN_ON(1);
  600. return 0;
  601. }
  602. if (i->type & (ITER_BVEC|ITER_KVEC)) {
  603. void *kaddr = kmap_atomic(page);
  604. size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
  605. kunmap_atomic(kaddr);
  606. return wanted;
  607. } else
  608. return copy_page_from_iter_iovec(page, offset, bytes, i);
  609. }
  610. EXPORT_SYMBOL(copy_page_from_iter);
  611. static size_t pipe_zero(size_t bytes, struct iov_iter *i)
  612. {
  613. struct pipe_inode_info *pipe = i->pipe;
  614. size_t n, off;
  615. int idx;
  616. if (!sanity(i))
  617. return 0;
  618. bytes = n = push_pipe(i, bytes, &idx, &off);
  619. if (unlikely(!n))
  620. return 0;
  621. for ( ; n; idx = next_idx(idx, pipe), off = 0) {
  622. size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  623. memzero_page(pipe->bufs[idx].page, off, chunk);
  624. i->idx = idx;
  625. i->iov_offset = off + chunk;
  626. n -= chunk;
  627. }
  628. i->count -= bytes;
  629. return bytes;
  630. }
  631. size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  632. {
  633. if (unlikely(i->type & ITER_PIPE))
  634. return pipe_zero(bytes, i);
  635. iterate_and_advance(i, bytes, v,
  636. __clear_user(v.iov_base, v.iov_len),
  637. memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  638. memset(v.iov_base, 0, v.iov_len)
  639. )
  640. return bytes;
  641. }
  642. EXPORT_SYMBOL(iov_iter_zero);
  643. size_t iov_iter_copy_from_user_atomic(struct page *page,
  644. struct iov_iter *i, unsigned long offset, size_t bytes)
  645. {
  646. char *kaddr = kmap_atomic(page), *p = kaddr + offset;
  647. if (unlikely(i->type & ITER_PIPE)) {
  648. kunmap_atomic(kaddr);
  649. WARN_ON(1);
  650. return 0;
  651. }
  652. iterate_all_kinds(i, bytes, v,
  653. __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  654. v.iov_base, v.iov_len),
  655. memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
  656. v.bv_offset, v.bv_len),
  657. memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  658. )
  659. kunmap_atomic(kaddr);
  660. return bytes;
  661. }
  662. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  663. static void pipe_advance(struct iov_iter *i, size_t size)
  664. {
  665. struct pipe_inode_info *pipe = i->pipe;
  666. struct pipe_buffer *buf;
  667. int idx = i->idx;
  668. size_t off = i->iov_offset, orig_sz;
  669. if (unlikely(i->count < size))
  670. size = i->count;
  671. orig_sz = size;
  672. if (size) {
  673. if (off) /* make it relative to the beginning of buffer */
  674. size += off - pipe->bufs[idx].offset;
  675. while (1) {
  676. buf = &pipe->bufs[idx];
  677. if (size <= buf->len)
  678. break;
  679. size -= buf->len;
  680. idx = next_idx(idx, pipe);
  681. }
  682. buf->len = size;
  683. i->idx = idx;
  684. off = i->iov_offset = buf->offset + size;
  685. }
  686. if (off)
  687. idx = next_idx(idx, pipe);
  688. if (pipe->nrbufs) {
  689. int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
  690. /* [curbuf,unused) is in use. Free [idx,unused) */
  691. while (idx != unused) {
  692. pipe_buf_release(pipe, &pipe->bufs[idx]);
  693. idx = next_idx(idx, pipe);
  694. pipe->nrbufs--;
  695. }
  696. }
  697. i->count -= orig_sz;
  698. }
  699. void iov_iter_advance(struct iov_iter *i, size_t size)
  700. {
  701. if (unlikely(i->type & ITER_PIPE)) {
  702. pipe_advance(i, size);
  703. return;
  704. }
  705. iterate_and_advance(i, size, v, 0, 0, 0)
  706. }
  707. EXPORT_SYMBOL(iov_iter_advance);
  708. /*
  709. * Return the count of just the current iov_iter segment.
  710. */
  711. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  712. {
  713. if (unlikely(i->type & ITER_PIPE))
  714. return i->count; // it is a silly place, anyway
  715. if (i->nr_segs == 1)
  716. return i->count;
  717. else if (i->type & ITER_BVEC)
  718. return min(i->count, i->bvec->bv_len - i->iov_offset);
  719. else
  720. return min(i->count, i->iov->iov_len - i->iov_offset);
  721. }
  722. EXPORT_SYMBOL(iov_iter_single_seg_count);
  723. void iov_iter_kvec(struct iov_iter *i, int direction,
  724. const struct kvec *kvec, unsigned long nr_segs,
  725. size_t count)
  726. {
  727. BUG_ON(!(direction & ITER_KVEC));
  728. i->type = direction;
  729. i->kvec = kvec;
  730. i->nr_segs = nr_segs;
  731. i->iov_offset = 0;
  732. i->count = count;
  733. }
  734. EXPORT_SYMBOL(iov_iter_kvec);
  735. void iov_iter_bvec(struct iov_iter *i, int direction,
  736. const struct bio_vec *bvec, unsigned long nr_segs,
  737. size_t count)
  738. {
  739. BUG_ON(!(direction & ITER_BVEC));
  740. i->type = direction;
  741. i->bvec = bvec;
  742. i->nr_segs = nr_segs;
  743. i->iov_offset = 0;
  744. i->count = count;
  745. }
  746. EXPORT_SYMBOL(iov_iter_bvec);
  747. void iov_iter_pipe(struct iov_iter *i, int direction,
  748. struct pipe_inode_info *pipe,
  749. size_t count)
  750. {
  751. BUG_ON(direction != ITER_PIPE);
  752. i->type = direction;
  753. i->pipe = pipe;
  754. i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
  755. i->iov_offset = 0;
  756. i->count = count;
  757. }
  758. EXPORT_SYMBOL(iov_iter_pipe);
  759. unsigned long iov_iter_alignment(const struct iov_iter *i)
  760. {
  761. unsigned long res = 0;
  762. size_t size = i->count;
  763. if (!size)
  764. return 0;
  765. if (unlikely(i->type & ITER_PIPE)) {
  766. if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
  767. return size | i->iov_offset;
  768. return size;
  769. }
  770. iterate_all_kinds(i, size, v,
  771. (res |= (unsigned long)v.iov_base | v.iov_len, 0),
  772. res |= v.bv_offset | v.bv_len,
  773. res |= (unsigned long)v.iov_base | v.iov_len
  774. )
  775. return res;
  776. }
  777. EXPORT_SYMBOL(iov_iter_alignment);
  778. unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
  779. {
  780. unsigned long res = 0;
  781. size_t size = i->count;
  782. if (!size)
  783. return 0;
  784. if (unlikely(i->type & ITER_PIPE)) {
  785. WARN_ON(1);
  786. return ~0U;
  787. }
  788. iterate_all_kinds(i, size, v,
  789. (res |= (!res ? 0 : (unsigned long)v.iov_base) |
  790. (size != v.iov_len ? size : 0), 0),
  791. (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
  792. (size != v.bv_len ? size : 0)),
  793. (res |= (!res ? 0 : (unsigned long)v.iov_base) |
  794. (size != v.iov_len ? size : 0))
  795. );
  796. return res;
  797. }
  798. EXPORT_SYMBOL(iov_iter_gap_alignment);
  799. static inline size_t __pipe_get_pages(struct iov_iter *i,
  800. size_t maxsize,
  801. struct page **pages,
  802. int idx,
  803. size_t *start)
  804. {
  805. struct pipe_inode_info *pipe = i->pipe;
  806. ssize_t n = push_pipe(i, maxsize, &idx, start);
  807. if (!n)
  808. return -EFAULT;
  809. maxsize = n;
  810. n += *start;
  811. while (n > 0) {
  812. get_page(*pages++ = pipe->bufs[idx].page);
  813. idx = next_idx(idx, pipe);
  814. n -= PAGE_SIZE;
  815. }
  816. return maxsize;
  817. }
  818. static ssize_t pipe_get_pages(struct iov_iter *i,
  819. struct page **pages, size_t maxsize, unsigned maxpages,
  820. size_t *start)
  821. {
  822. unsigned npages;
  823. size_t capacity;
  824. int idx;
  825. if (!sanity(i))
  826. return -EFAULT;
  827. data_start(i, &idx, start);
  828. /* some of this one + all after this one */
  829. npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
  830. capacity = min(npages,maxpages) * PAGE_SIZE - *start;
  831. return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
  832. }
  833. ssize_t iov_iter_get_pages(struct iov_iter *i,
  834. struct page **pages, size_t maxsize, unsigned maxpages,
  835. size_t *start)
  836. {
  837. if (maxsize > i->count)
  838. maxsize = i->count;
  839. if (!maxsize)
  840. return 0;
  841. if (unlikely(i->type & ITER_PIPE))
  842. return pipe_get_pages(i, pages, maxsize, maxpages, start);
  843. iterate_all_kinds(i, maxsize, v, ({
  844. unsigned long addr = (unsigned long)v.iov_base;
  845. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  846. int n;
  847. int res;
  848. if (len > maxpages * PAGE_SIZE)
  849. len = maxpages * PAGE_SIZE;
  850. addr &= ~(PAGE_SIZE - 1);
  851. n = DIV_ROUND_UP(len, PAGE_SIZE);
  852. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  853. if (unlikely(res < 0))
  854. return res;
  855. return (res == n ? len : res * PAGE_SIZE) - *start;
  856. 0;}),({
  857. /* can't be more than PAGE_SIZE */
  858. *start = v.bv_offset;
  859. get_page(*pages = v.bv_page);
  860. return v.bv_len;
  861. }),({
  862. return -EFAULT;
  863. })
  864. )
  865. return 0;
  866. }
  867. EXPORT_SYMBOL(iov_iter_get_pages);
  868. static struct page **get_pages_array(size_t n)
  869. {
  870. struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  871. if (!p)
  872. p = vmalloc(n * sizeof(struct page *));
  873. return p;
  874. }
  875. static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
  876. struct page ***pages, size_t maxsize,
  877. size_t *start)
  878. {
  879. struct page **p;
  880. size_t n;
  881. int idx;
  882. int npages;
  883. if (!sanity(i))
  884. return -EFAULT;
  885. data_start(i, &idx, start);
  886. /* some of this one + all after this one */
  887. npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
  888. n = npages * PAGE_SIZE - *start;
  889. if (maxsize > n)
  890. maxsize = n;
  891. else
  892. npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
  893. p = get_pages_array(npages);
  894. if (!p)
  895. return -ENOMEM;
  896. n = __pipe_get_pages(i, maxsize, p, idx, start);
  897. if (n > 0)
  898. *pages = p;
  899. else
  900. kvfree(p);
  901. return n;
  902. }
  903. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  904. struct page ***pages, size_t maxsize,
  905. size_t *start)
  906. {
  907. struct page **p;
  908. if (maxsize > i->count)
  909. maxsize = i->count;
  910. if (!maxsize)
  911. return 0;
  912. if (unlikely(i->type & ITER_PIPE))
  913. return pipe_get_pages_alloc(i, pages, maxsize, start);
  914. iterate_all_kinds(i, maxsize, v, ({
  915. unsigned long addr = (unsigned long)v.iov_base;
  916. size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  917. int n;
  918. int res;
  919. addr &= ~(PAGE_SIZE - 1);
  920. n = DIV_ROUND_UP(len, PAGE_SIZE);
  921. p = get_pages_array(n);
  922. if (!p)
  923. return -ENOMEM;
  924. res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  925. if (unlikely(res < 0)) {
  926. kvfree(p);
  927. return res;
  928. }
  929. *pages = p;
  930. return (res == n ? len : res * PAGE_SIZE) - *start;
  931. 0;}),({
  932. /* can't be more than PAGE_SIZE */
  933. *start = v.bv_offset;
  934. *pages = p = get_pages_array(1);
  935. if (!p)
  936. return -ENOMEM;
  937. get_page(*p = v.bv_page);
  938. return v.bv_len;
  939. }),({
  940. return -EFAULT;
  941. })
  942. )
  943. return 0;
  944. }
  945. EXPORT_SYMBOL(iov_iter_get_pages_alloc);
  946. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  947. struct iov_iter *i)
  948. {
  949. char *to = addr;
  950. __wsum sum, next;
  951. size_t off = 0;
  952. sum = *csum;
  953. if (unlikely(i->type & ITER_PIPE)) {
  954. WARN_ON(1);
  955. return 0;
  956. }
  957. iterate_and_advance(i, bytes, v, ({
  958. int err = 0;
  959. next = csum_and_copy_from_user(v.iov_base,
  960. (to += v.iov_len) - v.iov_len,
  961. v.iov_len, 0, &err);
  962. if (!err) {
  963. sum = csum_block_add(sum, next, off);
  964. off += v.iov_len;
  965. }
  966. err ? v.iov_len : 0;
  967. }), ({
  968. char *p = kmap_atomic(v.bv_page);
  969. next = csum_partial_copy_nocheck(p + v.bv_offset,
  970. (to += v.bv_len) - v.bv_len,
  971. v.bv_len, 0);
  972. kunmap_atomic(p);
  973. sum = csum_block_add(sum, next, off);
  974. off += v.bv_len;
  975. }),({
  976. next = csum_partial_copy_nocheck(v.iov_base,
  977. (to += v.iov_len) - v.iov_len,
  978. v.iov_len, 0);
  979. sum = csum_block_add(sum, next, off);
  980. off += v.iov_len;
  981. })
  982. )
  983. *csum = sum;
  984. return bytes;
  985. }
  986. EXPORT_SYMBOL(csum_and_copy_from_iter);
  987. bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
  988. struct iov_iter *i)
  989. {
  990. char *to = addr;
  991. __wsum sum, next;
  992. size_t off = 0;
  993. sum = *csum;
  994. if (unlikely(i->type & ITER_PIPE)) {
  995. WARN_ON(1);
  996. return false;
  997. }
  998. if (unlikely(i->count < bytes))
  999. return false;
  1000. iterate_all_kinds(i, bytes, v, ({
  1001. int err = 0;
  1002. next = csum_and_copy_from_user(v.iov_base,
  1003. (to += v.iov_len) - v.iov_len,
  1004. v.iov_len, 0, &err);
  1005. if (err)
  1006. return false;
  1007. sum = csum_block_add(sum, next, off);
  1008. off += v.iov_len;
  1009. 0;
  1010. }), ({
  1011. char *p = kmap_atomic(v.bv_page);
  1012. next = csum_partial_copy_nocheck(p + v.bv_offset,
  1013. (to += v.bv_len) - v.bv_len,
  1014. v.bv_len, 0);
  1015. kunmap_atomic(p);
  1016. sum = csum_block_add(sum, next, off);
  1017. off += v.bv_len;
  1018. }),({
  1019. next = csum_partial_copy_nocheck(v.iov_base,
  1020. (to += v.iov_len) - v.iov_len,
  1021. v.iov_len, 0);
  1022. sum = csum_block_add(sum, next, off);
  1023. off += v.iov_len;
  1024. })
  1025. )
  1026. *csum = sum;
  1027. iov_iter_advance(i, bytes);
  1028. return true;
  1029. }
  1030. EXPORT_SYMBOL(csum_and_copy_from_iter_full);
  1031. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
  1032. struct iov_iter *i)
  1033. {
  1034. const char *from = addr;
  1035. __wsum sum, next;
  1036. size_t off = 0;
  1037. sum = *csum;
  1038. if (unlikely(i->type & ITER_PIPE)) {
  1039. WARN_ON(1); /* for now */
  1040. return 0;
  1041. }
  1042. iterate_and_advance(i, bytes, v, ({
  1043. int err = 0;
  1044. next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
  1045. v.iov_base,
  1046. v.iov_len, 0, &err);
  1047. if (!err) {
  1048. sum = csum_block_add(sum, next, off);
  1049. off += v.iov_len;
  1050. }
  1051. err ? v.iov_len : 0;
  1052. }), ({
  1053. char *p = kmap_atomic(v.bv_page);
  1054. next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
  1055. p + v.bv_offset,
  1056. v.bv_len, 0);
  1057. kunmap_atomic(p);
  1058. sum = csum_block_add(sum, next, off);
  1059. off += v.bv_len;
  1060. }),({
  1061. next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
  1062. v.iov_base,
  1063. v.iov_len, 0);
  1064. sum = csum_block_add(sum, next, off);
  1065. off += v.iov_len;
  1066. })
  1067. )
  1068. *csum = sum;
  1069. return bytes;
  1070. }
  1071. EXPORT_SYMBOL(csum_and_copy_to_iter);
  1072. int iov_iter_npages(const struct iov_iter *i, int maxpages)
  1073. {
  1074. size_t size = i->count;
  1075. int npages = 0;
  1076. if (!size)
  1077. return 0;
  1078. if (unlikely(i->type & ITER_PIPE)) {
  1079. struct pipe_inode_info *pipe = i->pipe;
  1080. size_t off;
  1081. int idx;
  1082. if (!sanity(i))
  1083. return 0;
  1084. data_start(i, &idx, &off);
  1085. /* some of this one + all after this one */
  1086. npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
  1087. if (npages >= maxpages)
  1088. return maxpages;
  1089. } else iterate_all_kinds(i, size, v, ({
  1090. unsigned long p = (unsigned long)v.iov_base;
  1091. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  1092. - p / PAGE_SIZE;
  1093. if (npages >= maxpages)
  1094. return maxpages;
  1095. 0;}),({
  1096. npages++;
  1097. if (npages >= maxpages)
  1098. return maxpages;
  1099. }),({
  1100. unsigned long p = (unsigned long)v.iov_base;
  1101. npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  1102. - p / PAGE_SIZE;
  1103. if (npages >= maxpages)
  1104. return maxpages;
  1105. })
  1106. )
  1107. return npages;
  1108. }
  1109. EXPORT_SYMBOL(iov_iter_npages);
  1110. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  1111. {
  1112. *new = *old;
  1113. if (unlikely(new->type & ITER_PIPE)) {
  1114. WARN_ON(1);
  1115. return NULL;
  1116. }
  1117. if (new->type & ITER_BVEC)
  1118. return new->bvec = kmemdup(new->bvec,
  1119. new->nr_segs * sizeof(struct bio_vec),
  1120. flags);
  1121. else
  1122. /* iovec and kvec have identical layout */
  1123. return new->iov = kmemdup(new->iov,
  1124. new->nr_segs * sizeof(struct iovec),
  1125. flags);
  1126. }
  1127. EXPORT_SYMBOL(dup_iter);
  1128. /**
  1129. * import_iovec() - Copy an array of &struct iovec from userspace
  1130. * into the kernel, check that it is valid, and initialize a new
  1131. * &struct iov_iter iterator to access it.
  1132. *
  1133. * @type: One of %READ or %WRITE.
  1134. * @uvector: Pointer to the userspace array.
  1135. * @nr_segs: Number of elements in userspace array.
  1136. * @fast_segs: Number of elements in @iov.
  1137. * @iov: (input and output parameter) Pointer to pointer to (usually small
  1138. * on-stack) kernel array.
  1139. * @i: Pointer to iterator that will be initialized on success.
  1140. *
  1141. * If the array pointed to by *@iov is large enough to hold all @nr_segs,
  1142. * then this function places %NULL in *@iov on return. Otherwise, a new
  1143. * array will be allocated and the result placed in *@iov. This means that
  1144. * the caller may call kfree() on *@iov regardless of whether the small
  1145. * on-stack array was used or not (and regardless of whether this function
  1146. * returns an error or not).
  1147. *
  1148. * Return: 0 on success or negative error code on error.
  1149. */
  1150. int import_iovec(int type, const struct iovec __user * uvector,
  1151. unsigned nr_segs, unsigned fast_segs,
  1152. struct iovec **iov, struct iov_iter *i)
  1153. {
  1154. ssize_t n;
  1155. struct iovec *p;
  1156. n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  1157. *iov, &p);
  1158. if (n < 0) {
  1159. if (p != *iov)
  1160. kfree(p);
  1161. *iov = NULL;
  1162. return n;
  1163. }
  1164. iov_iter_init(i, type, p, nr_segs, n);
  1165. *iov = p == *iov ? NULL : p;
  1166. return 0;
  1167. }
  1168. EXPORT_SYMBOL(import_iovec);
  1169. #ifdef CONFIG_COMPAT
  1170. #include <linux/compat.h>
  1171. int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  1172. unsigned nr_segs, unsigned fast_segs,
  1173. struct iovec **iov, struct iov_iter *i)
  1174. {
  1175. ssize_t n;
  1176. struct iovec *p;
  1177. n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  1178. *iov, &p);
  1179. if (n < 0) {
  1180. if (p != *iov)
  1181. kfree(p);
  1182. *iov = NULL;
  1183. return n;
  1184. }
  1185. iov_iter_init(i, type, p, nr_segs, n);
  1186. *iov = p == *iov ? NULL : p;
  1187. return 0;
  1188. }
  1189. #endif
  1190. int import_single_range(int rw, void __user *buf, size_t len,
  1191. struct iovec *iov, struct iov_iter *i)
  1192. {
  1193. if (len > MAX_RW_COUNT)
  1194. len = MAX_RW_COUNT;
  1195. if (unlikely(!access_ok(!rw, buf, len)))
  1196. return -EFAULT;
  1197. iov->iov_base = buf;
  1198. iov->iov_len = len;
  1199. iov_iter_init(i, rw, iov, 1, len);
  1200. return 0;
  1201. }
  1202. EXPORT_SYMBOL(import_single_range);