splice.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * "splice": joining two ropes together by interweaving their strands.
  3. *
  4. * This is the "extended pipe" functionality, where a pipe is used as
  5. * an arbitrary in-memory buffer. Think of a pipe as a small kernel
  6. * buffer that you can use to transfer data from one end to the other.
  7. *
  8. * The traditional unix read/write is extended with a "splice()" operation
  9. * that transfers data buffers to or from a pipe buffer.
  10. *
  11. * Named by Larry McVoy, original implementation from Linus, extended by
  12. * Jens to support splicing to files and fixing the initial implementation
  13. * bugs.
  14. *
  15. * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
  16. * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
  17. *
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/file.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/pipe_fs_i.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/swap.h>
  25. #include <linux/writeback.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/module.h>
  28. #include <linux/syscalls.h>
  29. /*
  30. * Passed to the actors
  31. */
  32. struct splice_desc {
  33. unsigned int len, total_len; /* current and remaining length */
  34. unsigned int flags; /* splice flags */
  35. struct file *file; /* file to read/write */
  36. loff_t pos; /* file position */
  37. };
  38. /*
  39. * Attempt to steal a page from a pipe buffer. This should perhaps go into
  40. * a vm helper function, it's already simplified quite a bit by the
  41. * addition of remove_mapping(). If success is returned, the caller may
  42. * attempt to reuse this page for another destination.
  43. */
  44. static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
  45. struct pipe_buffer *buf)
  46. {
  47. struct page *page = buf->page;
  48. struct address_space *mapping = page_mapping(page);
  49. WARN_ON(!PageLocked(page));
  50. WARN_ON(!PageUptodate(page));
  51. /*
  52. * At least for ext2 with nobh option, we need to wait on writeback
  53. * completing on this page, since we'll remove it from the pagecache.
  54. * Otherwise truncate wont wait on the page, allowing the disk
  55. * blocks to be reused by someone else before we actually wrote our
  56. * data to them. fs corruption ensues.
  57. */
  58. wait_on_page_writeback(page);
  59. if (PagePrivate(page))
  60. try_to_release_page(page, mapping_gfp_mask(mapping));
  61. if (!remove_mapping(mapping, page))
  62. return 1;
  63. buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
  64. return 0;
  65. }
  66. static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
  67. struct pipe_buffer *buf)
  68. {
  69. page_cache_release(buf->page);
  70. buf->page = NULL;
  71. buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
  72. }
  73. static void *page_cache_pipe_buf_map(struct file *file,
  74. struct pipe_inode_info *info,
  75. struct pipe_buffer *buf)
  76. {
  77. struct page *page = buf->page;
  78. lock_page(page);
  79. if (!PageUptodate(page)) {
  80. unlock_page(page);
  81. return ERR_PTR(-EIO);
  82. }
  83. if (!page->mapping) {
  84. unlock_page(page);
  85. return ERR_PTR(-ENODATA);
  86. }
  87. return kmap(buf->page);
  88. }
  89. static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
  90. struct pipe_buffer *buf)
  91. {
  92. unlock_page(buf->page);
  93. kunmap(buf->page);
  94. }
  95. static struct pipe_buf_operations page_cache_pipe_buf_ops = {
  96. .can_merge = 0,
  97. .map = page_cache_pipe_buf_map,
  98. .unmap = page_cache_pipe_buf_unmap,
  99. .release = page_cache_pipe_buf_release,
  100. .steal = page_cache_pipe_buf_steal,
  101. };
  102. /*
  103. * Pipe output worker. This sets up our pipe format with the page cache
  104. * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
  105. */
  106. static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
  107. int nr_pages, unsigned long offset,
  108. unsigned long len, unsigned int flags)
  109. {
  110. struct pipe_inode_info *info;
  111. int ret, do_wakeup, i;
  112. ret = 0;
  113. do_wakeup = 0;
  114. i = 0;
  115. mutex_lock(PIPE_MUTEX(*inode));
  116. info = inode->i_pipe;
  117. for (;;) {
  118. int bufs;
  119. if (!PIPE_READERS(*inode)) {
  120. send_sig(SIGPIPE, current, 0);
  121. if (!ret)
  122. ret = -EPIPE;
  123. break;
  124. }
  125. bufs = info->nrbufs;
  126. if (bufs < PIPE_BUFFERS) {
  127. int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
  128. struct pipe_buffer *buf = info->bufs + newbuf;
  129. struct page *page = pages[i++];
  130. unsigned long this_len;
  131. this_len = PAGE_CACHE_SIZE - offset;
  132. if (this_len > len)
  133. this_len = len;
  134. buf->page = page;
  135. buf->offset = offset;
  136. buf->len = this_len;
  137. buf->ops = &page_cache_pipe_buf_ops;
  138. info->nrbufs = ++bufs;
  139. do_wakeup = 1;
  140. ret += this_len;
  141. len -= this_len;
  142. offset = 0;
  143. if (!--nr_pages)
  144. break;
  145. if (!len)
  146. break;
  147. if (bufs < PIPE_BUFFERS)
  148. continue;
  149. break;
  150. }
  151. if (flags & SPLICE_F_NONBLOCK) {
  152. if (!ret)
  153. ret = -EAGAIN;
  154. break;
  155. }
  156. if (signal_pending(current)) {
  157. if (!ret)
  158. ret = -ERESTARTSYS;
  159. break;
  160. }
  161. if (do_wakeup) {
  162. wake_up_interruptible_sync(PIPE_WAIT(*inode));
  163. kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
  164. POLL_IN);
  165. do_wakeup = 0;
  166. }
  167. PIPE_WAITING_WRITERS(*inode)++;
  168. pipe_wait(inode);
  169. PIPE_WAITING_WRITERS(*inode)--;
  170. }
  171. mutex_unlock(PIPE_MUTEX(*inode));
  172. if (do_wakeup) {
  173. wake_up_interruptible(PIPE_WAIT(*inode));
  174. kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
  175. }
  176. while (i < nr_pages)
  177. page_cache_release(pages[i++]);
  178. return ret;
  179. }
  180. static int __generic_file_splice_read(struct file *in, struct inode *pipe,
  181. size_t len, unsigned int flags)
  182. {
  183. struct address_space *mapping = in->f_mapping;
  184. unsigned int offset, nr_pages;
  185. struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
  186. struct page *page;
  187. pgoff_t index, pidx;
  188. int i, j;
  189. index = in->f_pos >> PAGE_CACHE_SHIFT;
  190. offset = in->f_pos & ~PAGE_CACHE_MASK;
  191. nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  192. if (nr_pages > PIPE_BUFFERS)
  193. nr_pages = PIPE_BUFFERS;
  194. /*
  195. * initiate read-ahead on this page range
  196. */
  197. do_page_cache_readahead(mapping, in, index, nr_pages);
  198. /*
  199. * Get as many pages from the page cache as possible..
  200. * Start IO on the page cache entries we create (we
  201. * can assume that any pre-existing ones we find have
  202. * already had IO started on them).
  203. */
  204. i = find_get_pages(mapping, index, nr_pages, pages);
  205. /*
  206. * common case - we found all pages and they are contiguous,
  207. * kick them off
  208. */
  209. if (i && (pages[i - 1]->index == index + i - 1))
  210. goto splice_them;
  211. /*
  212. * fill shadow[] with pages at the right locations, so we only
  213. * have to fill holes
  214. */
  215. memset(shadow, 0, nr_pages * sizeof(struct page *));
  216. for (j = 0; j < i; j++)
  217. shadow[pages[j]->index - index] = pages[j];
  218. /*
  219. * now fill in the holes
  220. */
  221. for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
  222. int error;
  223. if (shadow[i])
  224. continue;
  225. /*
  226. * no page there, look one up / create it
  227. */
  228. page = find_or_create_page(mapping, pidx,
  229. mapping_gfp_mask(mapping));
  230. if (!page)
  231. break;
  232. if (PageUptodate(page))
  233. unlock_page(page);
  234. else {
  235. error = mapping->a_ops->readpage(in, page);
  236. if (unlikely(error)) {
  237. page_cache_release(page);
  238. break;
  239. }
  240. }
  241. shadow[i] = page;
  242. }
  243. if (!i) {
  244. for (i = 0; i < nr_pages; i++) {
  245. if (shadow[i])
  246. page_cache_release(shadow[i]);
  247. }
  248. return 0;
  249. }
  250. memcpy(pages, shadow, i * sizeof(struct page *));
  251. /*
  252. * Now we splice them into the pipe..
  253. */
  254. splice_them:
  255. return move_to_pipe(pipe, pages, i, offset, len, flags);
  256. }
  257. /**
  258. * generic_file_splice_read - splice data from file to a pipe
  259. * @in: file to splice from
  260. * @pipe: pipe to splice to
  261. * @len: number of bytes to splice
  262. * @flags: splice modifier flags
  263. *
  264. * Will read pages from given file and fill them into a pipe.
  265. *
  266. */
  267. ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
  268. size_t len, unsigned int flags)
  269. {
  270. ssize_t spliced;
  271. int ret;
  272. ret = 0;
  273. spliced = 0;
  274. while (len) {
  275. ret = __generic_file_splice_read(in, pipe, len, flags);
  276. if (ret <= 0)
  277. break;
  278. in->f_pos += ret;
  279. len -= ret;
  280. spliced += ret;
  281. if (!(flags & SPLICE_F_NONBLOCK))
  282. continue;
  283. ret = -EAGAIN;
  284. break;
  285. }
  286. if (spliced)
  287. return spliced;
  288. return ret;
  289. }
  290. EXPORT_SYMBOL(generic_file_splice_read);
  291. /*
  292. * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
  293. * using sendpage().
  294. */
  295. static int pipe_to_sendpage(struct pipe_inode_info *info,
  296. struct pipe_buffer *buf, struct splice_desc *sd)
  297. {
  298. struct file *file = sd->file;
  299. loff_t pos = sd->pos;
  300. unsigned int offset;
  301. ssize_t ret;
  302. void *ptr;
  303. int more;
  304. /*
  305. * sub-optimal, but we are limited by the pipe ->map. we don't
  306. * need a kmap'ed buffer here, we just want to make sure we
  307. * have the page pinned if the pipe page originates from the
  308. * page cache
  309. */
  310. ptr = buf->ops->map(file, info, buf);
  311. if (IS_ERR(ptr))
  312. return PTR_ERR(ptr);
  313. offset = pos & ~PAGE_CACHE_MASK;
  314. more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
  315. ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
  316. buf->ops->unmap(info, buf);
  317. if (ret == sd->len)
  318. return 0;
  319. return -EIO;
  320. }
  321. /*
  322. * This is a little more tricky than the file -> pipe splicing. There are
  323. * basically three cases:
  324. *
  325. * - Destination page already exists in the address space and there
  326. * are users of it. For that case we have no other option that
  327. * copying the data. Tough luck.
  328. * - Destination page already exists in the address space, but there
  329. * are no users of it. Make sure it's uptodate, then drop it. Fall
  330. * through to last case.
  331. * - Destination page does not exist, we can add the pipe page to
  332. * the page cache and avoid the copy.
  333. *
  334. * If asked to move pages to the output file (SPLICE_F_MOVE is set in
  335. * sd->flags), we attempt to migrate pages from the pipe to the output
  336. * file address space page cache. This is possible if no one else has
  337. * the pipe page referenced outside of the pipe and page cache. If
  338. * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
  339. * a new page in the output file page cache and fill/dirty that.
  340. */
  341. static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
  342. struct splice_desc *sd)
  343. {
  344. struct file *file = sd->file;
  345. struct address_space *mapping = file->f_mapping;
  346. gfp_t gfp_mask = mapping_gfp_mask(mapping);
  347. unsigned int offset;
  348. struct page *page;
  349. pgoff_t index;
  350. char *src;
  351. int ret;
  352. /*
  353. * after this, page will be locked and unmapped
  354. */
  355. src = buf->ops->map(file, info, buf);
  356. if (IS_ERR(src))
  357. return PTR_ERR(src);
  358. index = sd->pos >> PAGE_CACHE_SHIFT;
  359. offset = sd->pos & ~PAGE_CACHE_MASK;
  360. /*
  361. * reuse buf page, if SPLICE_F_MOVE is set
  362. */
  363. if (sd->flags & SPLICE_F_MOVE) {
  364. /*
  365. * If steal succeeds, buf->page is now pruned from the vm
  366. * side (LRU and page cache) and we can reuse it.
  367. */
  368. if (buf->ops->steal(info, buf))
  369. goto find_page;
  370. page = buf->page;
  371. if (add_to_page_cache(page, mapping, index, gfp_mask))
  372. goto find_page;
  373. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  374. lru_cache_add(page);
  375. } else {
  376. find_page:
  377. ret = -ENOMEM;
  378. page = find_or_create_page(mapping, index, gfp_mask);
  379. if (!page)
  380. goto out;
  381. /*
  382. * If the page is uptodate, it is also locked. If it isn't
  383. * uptodate, we can mark it uptodate if we are filling the
  384. * full page. Otherwise we need to read it in first...
  385. */
  386. if (!PageUptodate(page)) {
  387. if (sd->len < PAGE_CACHE_SIZE) {
  388. ret = mapping->a_ops->readpage(file, page);
  389. if (unlikely(ret))
  390. goto out;
  391. lock_page(page);
  392. if (!PageUptodate(page)) {
  393. /*
  394. * page got invalidated, repeat
  395. */
  396. if (!page->mapping) {
  397. unlock_page(page);
  398. page_cache_release(page);
  399. goto find_page;
  400. }
  401. ret = -EIO;
  402. goto out;
  403. }
  404. } else {
  405. WARN_ON(!PageLocked(page));
  406. SetPageUptodate(page);
  407. }
  408. }
  409. }
  410. ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
  411. if (ret == AOP_TRUNCATED_PAGE) {
  412. page_cache_release(page);
  413. goto find_page;
  414. } else if (ret)
  415. goto out;
  416. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  417. char *dst = kmap_atomic(page, KM_USER0);
  418. memcpy(dst + offset, src + buf->offset, sd->len);
  419. flush_dcache_page(page);
  420. kunmap_atomic(dst, KM_USER0);
  421. }
  422. ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
  423. if (ret == AOP_TRUNCATED_PAGE) {
  424. page_cache_release(page);
  425. goto find_page;
  426. } else if (ret)
  427. goto out;
  428. balance_dirty_pages_ratelimited(mapping);
  429. out:
  430. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  431. page_cache_release(page);
  432. unlock_page(page);
  433. }
  434. buf->ops->unmap(info, buf);
  435. return ret;
  436. }
  437. typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
  438. struct splice_desc *);
  439. /*
  440. * Pipe input worker. Most of this logic works like a regular pipe, the
  441. * key here is the 'actor' worker passed in that actually moves the data
  442. * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
  443. */
  444. static ssize_t move_from_pipe(struct inode *inode, struct file *out,
  445. size_t len, unsigned int flags,
  446. splice_actor *actor)
  447. {
  448. struct pipe_inode_info *info;
  449. int ret, do_wakeup, err;
  450. struct splice_desc sd;
  451. ret = 0;
  452. do_wakeup = 0;
  453. sd.total_len = len;
  454. sd.flags = flags;
  455. sd.file = out;
  456. sd.pos = out->f_pos;
  457. mutex_lock(PIPE_MUTEX(*inode));
  458. info = inode->i_pipe;
  459. for (;;) {
  460. int bufs = info->nrbufs;
  461. if (bufs) {
  462. int curbuf = info->curbuf;
  463. struct pipe_buffer *buf = info->bufs + curbuf;
  464. struct pipe_buf_operations *ops = buf->ops;
  465. sd.len = buf->len;
  466. if (sd.len > sd.total_len)
  467. sd.len = sd.total_len;
  468. err = actor(info, buf, &sd);
  469. if (err) {
  470. if (!ret && err != -ENODATA)
  471. ret = err;
  472. break;
  473. }
  474. ret += sd.len;
  475. buf->offset += sd.len;
  476. buf->len -= sd.len;
  477. if (!buf->len) {
  478. buf->ops = NULL;
  479. ops->release(info, buf);
  480. curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
  481. info->curbuf = curbuf;
  482. info->nrbufs = --bufs;
  483. do_wakeup = 1;
  484. }
  485. sd.pos += sd.len;
  486. sd.total_len -= sd.len;
  487. if (!sd.total_len)
  488. break;
  489. }
  490. if (bufs)
  491. continue;
  492. if (!PIPE_WRITERS(*inode))
  493. break;
  494. if (!PIPE_WAITING_WRITERS(*inode)) {
  495. if (ret)
  496. break;
  497. }
  498. if (flags & SPLICE_F_NONBLOCK) {
  499. if (!ret)
  500. ret = -EAGAIN;
  501. break;
  502. }
  503. if (signal_pending(current)) {
  504. if (!ret)
  505. ret = -ERESTARTSYS;
  506. break;
  507. }
  508. if (do_wakeup) {
  509. wake_up_interruptible_sync(PIPE_WAIT(*inode));
  510. kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
  511. do_wakeup = 0;
  512. }
  513. pipe_wait(inode);
  514. }
  515. mutex_unlock(PIPE_MUTEX(*inode));
  516. if (do_wakeup) {
  517. wake_up_interruptible(PIPE_WAIT(*inode));
  518. kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
  519. }
  520. mutex_lock(&out->f_mapping->host->i_mutex);
  521. out->f_pos = sd.pos;
  522. mutex_unlock(&out->f_mapping->host->i_mutex);
  523. return ret;
  524. }
  525. /**
  526. * generic_file_splice_write - splice data from a pipe to a file
  527. * @inode: pipe inode
  528. * @out: file to write to
  529. * @len: number of bytes to splice
  530. * @flags: splice modifier flags
  531. *
  532. * Will either move or copy pages (determined by @flags options) from
  533. * the given pipe inode to the given file.
  534. *
  535. */
  536. ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
  537. size_t len, unsigned int flags)
  538. {
  539. struct address_space *mapping = out->f_mapping;
  540. ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
  541. /*
  542. * if file or inode is SYNC and we actually wrote some data, sync it
  543. */
  544. if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
  545. && ret > 0) {
  546. struct inode *inode = mapping->host;
  547. int err;
  548. mutex_lock(&inode->i_mutex);
  549. err = generic_osync_inode(mapping->host, mapping,
  550. OSYNC_METADATA|OSYNC_DATA);
  551. mutex_unlock(&inode->i_mutex);
  552. if (err)
  553. ret = err;
  554. }
  555. return ret;
  556. }
  557. EXPORT_SYMBOL(generic_file_splice_write);
  558. /**
  559. * generic_splice_sendpage - splice data from a pipe to a socket
  560. * @inode: pipe inode
  561. * @out: socket to write to
  562. * @len: number of bytes to splice
  563. * @flags: splice modifier flags
  564. *
  565. * Will send @len bytes from the pipe to a network socket. No data copying
  566. * is involved.
  567. *
  568. */
  569. ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
  570. size_t len, unsigned int flags)
  571. {
  572. return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
  573. }
  574. EXPORT_SYMBOL(generic_splice_sendpage);
  575. /*
  576. * Attempt to initiate a splice from pipe to file.
  577. */
  578. static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
  579. unsigned int flags)
  580. {
  581. loff_t pos;
  582. int ret;
  583. if (!out->f_op || !out->f_op->splice_write)
  584. return -EINVAL;
  585. if (!(out->f_mode & FMODE_WRITE))
  586. return -EBADF;
  587. pos = out->f_pos;
  588. ret = rw_verify_area(WRITE, out, &pos, len);
  589. if (unlikely(ret < 0))
  590. return ret;
  591. return out->f_op->splice_write(pipe, out, len, flags);
  592. }
  593. /*
  594. * Attempt to initiate a splice from a file to a pipe.
  595. */
  596. static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
  597. unsigned int flags)
  598. {
  599. loff_t pos, isize, left;
  600. int ret;
  601. if (!in->f_op || !in->f_op->splice_read)
  602. return -EINVAL;
  603. if (!(in->f_mode & FMODE_READ))
  604. return -EBADF;
  605. pos = in->f_pos;
  606. ret = rw_verify_area(READ, in, &pos, len);
  607. if (unlikely(ret < 0))
  608. return ret;
  609. isize = i_size_read(in->f_mapping->host);
  610. if (unlikely(in->f_pos >= isize))
  611. return 0;
  612. left = isize - in->f_pos;
  613. if (left < len)
  614. len = left;
  615. return in->f_op->splice_read(in, pipe, len, flags);
  616. }
  617. /*
  618. * Determine where to splice to/from.
  619. */
  620. static long do_splice(struct file *in, struct file *out, size_t len,
  621. unsigned int flags)
  622. {
  623. struct inode *pipe;
  624. pipe = in->f_dentry->d_inode;
  625. if (pipe->i_pipe)
  626. return do_splice_from(pipe, out, len, flags);
  627. pipe = out->f_dentry->d_inode;
  628. if (pipe->i_pipe)
  629. return do_splice_to(in, pipe, len, flags);
  630. return -EINVAL;
  631. }
  632. asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
  633. {
  634. long error;
  635. struct file *in, *out;
  636. int fput_in, fput_out;
  637. if (unlikely(!len))
  638. return 0;
  639. error = -EBADF;
  640. in = fget_light(fdin, &fput_in);
  641. if (in) {
  642. if (in->f_mode & FMODE_READ) {
  643. out = fget_light(fdout, &fput_out);
  644. if (out) {
  645. if (out->f_mode & FMODE_WRITE)
  646. error = do_splice(in, out, len, flags);
  647. fput_light(out, fput_out);
  648. }
  649. }
  650. fput_light(in, fput_in);
  651. }
  652. return error;
  653. }