read_write.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368
  1. /*
  2. * linux/fs/read_write.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/stat.h>
  8. #include <linux/fcntl.h>
  9. #include <linux/file.h>
  10. #include <linux/uio.h>
  11. #include <linux/aio.h>
  12. #include <linux/fsnotify.h>
  13. #include <linux/security.h>
  14. #include <linux/export.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/splice.h>
  18. #include <linux/compat.h>
  19. #include "internal.h"
  20. #include <asm/uaccess.h>
  21. #include <asm/unistd.h>
  22. typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
  23. typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
  24. unsigned long, loff_t);
  25. typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
  26. const struct file_operations generic_ro_fops = {
  27. .llseek = generic_file_llseek,
  28. .read = new_sync_read,
  29. .read_iter = generic_file_read_iter,
  30. .mmap = generic_file_readonly_mmap,
  31. .splice_read = generic_file_splice_read,
  32. };
  33. EXPORT_SYMBOL(generic_ro_fops);
  34. static inline int unsigned_offsets(struct file *file)
  35. {
  36. return file->f_mode & FMODE_UNSIGNED_OFFSET;
  37. }
  38. /**
  39. * vfs_setpos - update the file offset for lseek
  40. * @file: file structure in question
  41. * @offset: file offset to seek to
  42. * @maxsize: maximum file size
  43. *
  44. * This is a low-level filesystem helper for updating the file offset to
  45. * the value specified by @offset if the given offset is valid and it is
  46. * not equal to the current file offset.
  47. *
  48. * Return the specified offset on success and -EINVAL on invalid offset.
  49. */
  50. loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
  51. {
  52. if (offset < 0 && !unsigned_offsets(file))
  53. return -EINVAL;
  54. if (offset > maxsize)
  55. return -EINVAL;
  56. if (offset != file->f_pos) {
  57. file->f_pos = offset;
  58. file->f_version = 0;
  59. }
  60. return offset;
  61. }
  62. EXPORT_SYMBOL(vfs_setpos);
  63. /**
  64. * generic_file_llseek_size - generic llseek implementation for regular files
  65. * @file: file structure to seek on
  66. * @offset: file offset to seek to
  67. * @whence: type of seek
  68. * @size: max size of this file in file system
  69. * @eof: offset used for SEEK_END position
  70. *
  71. * This is a variant of generic_file_llseek that allows passing in a custom
  72. * maximum file size and a custom EOF position, for e.g. hashed directories
  73. *
  74. * Synchronization:
  75. * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
  76. * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
  77. * read/writes behave like SEEK_SET against seeks.
  78. */
  79. loff_t
  80. generic_file_llseek_size(struct file *file, loff_t offset, int whence,
  81. loff_t maxsize, loff_t eof)
  82. {
  83. switch (whence) {
  84. case SEEK_END:
  85. offset += eof;
  86. break;
  87. case SEEK_CUR:
  88. /*
  89. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  90. * position-querying operation. Avoid rewriting the "same"
  91. * f_pos value back to the file because a concurrent read(),
  92. * write() or lseek() might have altered it
  93. */
  94. if (offset == 0)
  95. return file->f_pos;
  96. /*
  97. * f_lock protects against read/modify/write race with other
  98. * SEEK_CURs. Note that parallel writes and reads behave
  99. * like SEEK_SET.
  100. */
  101. spin_lock(&file->f_lock);
  102. offset = vfs_setpos(file, file->f_pos + offset, maxsize);
  103. spin_unlock(&file->f_lock);
  104. return offset;
  105. case SEEK_DATA:
  106. /*
  107. * In the generic case the entire file is data, so as long as
  108. * offset isn't at the end of the file then the offset is data.
  109. */
  110. if (offset >= eof)
  111. return -ENXIO;
  112. break;
  113. case SEEK_HOLE:
  114. /*
  115. * There is a virtual hole at the end of the file, so as long as
  116. * offset isn't i_size or larger, return i_size.
  117. */
  118. if (offset >= eof)
  119. return -ENXIO;
  120. offset = eof;
  121. break;
  122. }
  123. return vfs_setpos(file, offset, maxsize);
  124. }
  125. EXPORT_SYMBOL(generic_file_llseek_size);
  126. /**
  127. * generic_file_llseek - generic llseek implementation for regular files
  128. * @file: file structure to seek on
  129. * @offset: file offset to seek to
  130. * @whence: type of seek
  131. *
  132. * This is a generic implemenation of ->llseek useable for all normal local
  133. * filesystems. It just updates the file offset to the value specified by
  134. * @offset and @whence.
  135. */
  136. loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
  137. {
  138. struct inode *inode = file->f_mapping->host;
  139. return generic_file_llseek_size(file, offset, whence,
  140. inode->i_sb->s_maxbytes,
  141. i_size_read(inode));
  142. }
  143. EXPORT_SYMBOL(generic_file_llseek);
  144. /**
  145. * fixed_size_llseek - llseek implementation for fixed-sized devices
  146. * @file: file structure to seek on
  147. * @offset: file offset to seek to
  148. * @whence: type of seek
  149. * @size: size of the file
  150. *
  151. */
  152. loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
  153. {
  154. switch (whence) {
  155. case SEEK_SET: case SEEK_CUR: case SEEK_END:
  156. return generic_file_llseek_size(file, offset, whence,
  157. size, size);
  158. default:
  159. return -EINVAL;
  160. }
  161. }
  162. EXPORT_SYMBOL(fixed_size_llseek);
  163. /**
  164. * noop_llseek - No Operation Performed llseek implementation
  165. * @file: file structure to seek on
  166. * @offset: file offset to seek to
  167. * @whence: type of seek
  168. *
  169. * This is an implementation of ->llseek useable for the rare special case when
  170. * userspace expects the seek to succeed but the (device) file is actually not
  171. * able to perform the seek. In this case you use noop_llseek() instead of
  172. * falling back to the default implementation of ->llseek.
  173. */
  174. loff_t noop_llseek(struct file *file, loff_t offset, int whence)
  175. {
  176. return file->f_pos;
  177. }
  178. EXPORT_SYMBOL(noop_llseek);
  179. loff_t no_llseek(struct file *file, loff_t offset, int whence)
  180. {
  181. return -ESPIPE;
  182. }
  183. EXPORT_SYMBOL(no_llseek);
  184. loff_t default_llseek(struct file *file, loff_t offset, int whence)
  185. {
  186. struct inode *inode = file_inode(file);
  187. loff_t retval;
  188. mutex_lock(&inode->i_mutex);
  189. switch (whence) {
  190. case SEEK_END:
  191. offset += i_size_read(inode);
  192. break;
  193. case SEEK_CUR:
  194. if (offset == 0) {
  195. retval = file->f_pos;
  196. goto out;
  197. }
  198. offset += file->f_pos;
  199. break;
  200. case SEEK_DATA:
  201. /*
  202. * In the generic case the entire file is data, so as
  203. * long as offset isn't at the end of the file then the
  204. * offset is data.
  205. */
  206. if (offset >= inode->i_size) {
  207. retval = -ENXIO;
  208. goto out;
  209. }
  210. break;
  211. case SEEK_HOLE:
  212. /*
  213. * There is a virtual hole at the end of the file, so
  214. * as long as offset isn't i_size or larger, return
  215. * i_size.
  216. */
  217. if (offset >= inode->i_size) {
  218. retval = -ENXIO;
  219. goto out;
  220. }
  221. offset = inode->i_size;
  222. break;
  223. }
  224. retval = -EINVAL;
  225. if (offset >= 0 || unsigned_offsets(file)) {
  226. if (offset != file->f_pos) {
  227. file->f_pos = offset;
  228. file->f_version = 0;
  229. }
  230. retval = offset;
  231. }
  232. out:
  233. mutex_unlock(&inode->i_mutex);
  234. return retval;
  235. }
  236. EXPORT_SYMBOL(default_llseek);
  237. loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
  238. {
  239. loff_t (*fn)(struct file *, loff_t, int);
  240. fn = no_llseek;
  241. if (file->f_mode & FMODE_LSEEK) {
  242. if (file->f_op->llseek)
  243. fn = file->f_op->llseek;
  244. }
  245. return fn(file, offset, whence);
  246. }
  247. EXPORT_SYMBOL(vfs_llseek);
  248. static inline struct fd fdget_pos(int fd)
  249. {
  250. return __to_fd(__fdget_pos(fd));
  251. }
  252. static inline void fdput_pos(struct fd f)
  253. {
  254. if (f.flags & FDPUT_POS_UNLOCK)
  255. mutex_unlock(&f.file->f_pos_lock);
  256. fdput(f);
  257. }
  258. SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
  259. {
  260. off_t retval;
  261. struct fd f = fdget_pos(fd);
  262. if (!f.file)
  263. return -EBADF;
  264. retval = -EINVAL;
  265. if (whence <= SEEK_MAX) {
  266. loff_t res = vfs_llseek(f.file, offset, whence);
  267. retval = res;
  268. if (res != (loff_t)retval)
  269. retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
  270. }
  271. fdput_pos(f);
  272. return retval;
  273. }
  274. #ifdef CONFIG_COMPAT
  275. COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
  276. {
  277. return sys_lseek(fd, offset, whence);
  278. }
  279. #endif
  280. #ifdef __ARCH_WANT_SYS_LLSEEK
  281. SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
  282. unsigned long, offset_low, loff_t __user *, result,
  283. unsigned int, whence)
  284. {
  285. int retval;
  286. struct fd f = fdget_pos(fd);
  287. loff_t offset;
  288. if (!f.file)
  289. return -EBADF;
  290. retval = -EINVAL;
  291. if (whence > SEEK_MAX)
  292. goto out_putf;
  293. offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
  294. whence);
  295. retval = (int)offset;
  296. if (offset >= 0) {
  297. retval = -EFAULT;
  298. if (!copy_to_user(result, &offset, sizeof(offset)))
  299. retval = 0;
  300. }
  301. out_putf:
  302. fdput_pos(f);
  303. return retval;
  304. }
  305. #endif
  306. /*
  307. * rw_verify_area doesn't like huge counts. We limit
  308. * them to something that fits in "int" so that others
  309. * won't have to do range checks all the time.
  310. */
  311. int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
  312. {
  313. struct inode *inode;
  314. loff_t pos;
  315. int retval = -EINVAL;
  316. inode = file_inode(file);
  317. if (unlikely((ssize_t) count < 0))
  318. return retval;
  319. pos = *ppos;
  320. if (unlikely(pos < 0)) {
  321. if (!unsigned_offsets(file))
  322. return retval;
  323. if (count >= -pos) /* both values are in 0..LLONG_MAX */
  324. return -EOVERFLOW;
  325. } else if (unlikely((loff_t) (pos + count) < 0)) {
  326. if (!unsigned_offsets(file))
  327. return retval;
  328. }
  329. if (unlikely(inode->i_flock && mandatory_lock(inode))) {
  330. retval = locks_mandatory_area(
  331. read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
  332. inode, file, pos, count);
  333. if (retval < 0)
  334. return retval;
  335. }
  336. retval = security_file_permission(file,
  337. read_write == READ ? MAY_READ : MAY_WRITE);
  338. if (retval)
  339. return retval;
  340. return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
  341. }
  342. ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  343. {
  344. struct iovec iov = { .iov_base = buf, .iov_len = len };
  345. struct kiocb kiocb;
  346. ssize_t ret;
  347. init_sync_kiocb(&kiocb, filp);
  348. kiocb.ki_pos = *ppos;
  349. kiocb.ki_nbytes = len;
  350. ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
  351. if (-EIOCBQUEUED == ret)
  352. ret = wait_on_sync_kiocb(&kiocb);
  353. *ppos = kiocb.ki_pos;
  354. return ret;
  355. }
  356. EXPORT_SYMBOL(do_sync_read);
  357. ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  358. {
  359. struct iovec iov = { .iov_base = buf, .iov_len = len };
  360. struct kiocb kiocb;
  361. struct iov_iter iter;
  362. ssize_t ret;
  363. init_sync_kiocb(&kiocb, filp);
  364. kiocb.ki_pos = *ppos;
  365. kiocb.ki_nbytes = len;
  366. iov_iter_init(&iter, READ, &iov, 1, len);
  367. ret = filp->f_op->read_iter(&kiocb, &iter);
  368. if (-EIOCBQUEUED == ret)
  369. ret = wait_on_sync_kiocb(&kiocb);
  370. *ppos = kiocb.ki_pos;
  371. return ret;
  372. }
  373. EXPORT_SYMBOL(new_sync_read);
  374. ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
  375. {
  376. ssize_t ret;
  377. if (!(file->f_mode & FMODE_READ))
  378. return -EBADF;
  379. if (!(file->f_mode & FMODE_CAN_READ))
  380. return -EINVAL;
  381. if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
  382. return -EFAULT;
  383. ret = rw_verify_area(READ, file, pos, count);
  384. if (ret >= 0) {
  385. count = ret;
  386. if (file->f_op->read)
  387. ret = file->f_op->read(file, buf, count, pos);
  388. else if (file->f_op->aio_read)
  389. ret = do_sync_read(file, buf, count, pos);
  390. else
  391. ret = new_sync_read(file, buf, count, pos);
  392. if (ret > 0) {
  393. fsnotify_access(file);
  394. add_rchar(current, ret);
  395. }
  396. inc_syscr(current);
  397. }
  398. return ret;
  399. }
  400. EXPORT_SYMBOL(vfs_read);
  401. ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  402. {
  403. struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
  404. struct kiocb kiocb;
  405. ssize_t ret;
  406. init_sync_kiocb(&kiocb, filp);
  407. kiocb.ki_pos = *ppos;
  408. kiocb.ki_nbytes = len;
  409. ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
  410. if (-EIOCBQUEUED == ret)
  411. ret = wait_on_sync_kiocb(&kiocb);
  412. *ppos = kiocb.ki_pos;
  413. return ret;
  414. }
  415. EXPORT_SYMBOL(do_sync_write);
  416. ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  417. {
  418. struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
  419. struct kiocb kiocb;
  420. struct iov_iter iter;
  421. ssize_t ret;
  422. init_sync_kiocb(&kiocb, filp);
  423. kiocb.ki_pos = *ppos;
  424. kiocb.ki_nbytes = len;
  425. iov_iter_init(&iter, WRITE, &iov, 1, len);
  426. ret = filp->f_op->write_iter(&kiocb, &iter);
  427. if (-EIOCBQUEUED == ret)
  428. ret = wait_on_sync_kiocb(&kiocb);
  429. *ppos = kiocb.ki_pos;
  430. return ret;
  431. }
  432. EXPORT_SYMBOL(new_sync_write);
  433. ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
  434. {
  435. mm_segment_t old_fs;
  436. const char __user *p;
  437. ssize_t ret;
  438. if (!(file->f_mode & FMODE_CAN_WRITE))
  439. return -EINVAL;
  440. old_fs = get_fs();
  441. set_fs(get_ds());
  442. p = (__force const char __user *)buf;
  443. if (count > MAX_RW_COUNT)
  444. count = MAX_RW_COUNT;
  445. if (file->f_op->write)
  446. ret = file->f_op->write(file, p, count, pos);
  447. else if (file->f_op->aio_write)
  448. ret = do_sync_write(file, p, count, pos);
  449. else
  450. ret = new_sync_write(file, p, count, pos);
  451. set_fs(old_fs);
  452. if (ret > 0) {
  453. fsnotify_modify(file);
  454. add_wchar(current, ret);
  455. }
  456. inc_syscw(current);
  457. return ret;
  458. }
  459. EXPORT_SYMBOL(__kernel_write);
  460. ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
  461. {
  462. ssize_t ret;
  463. if (!(file->f_mode & FMODE_WRITE))
  464. return -EBADF;
  465. if (!(file->f_mode & FMODE_CAN_WRITE))
  466. return -EINVAL;
  467. if (unlikely(!access_ok(VERIFY_READ, buf, count)))
  468. return -EFAULT;
  469. ret = rw_verify_area(WRITE, file, pos, count);
  470. if (ret >= 0) {
  471. count = ret;
  472. file_start_write(file);
  473. if (file->f_op->write)
  474. ret = file->f_op->write(file, buf, count, pos);
  475. else if (file->f_op->aio_write)
  476. ret = do_sync_write(file, buf, count, pos);
  477. else
  478. ret = new_sync_write(file, buf, count, pos);
  479. if (ret > 0) {
  480. fsnotify_modify(file);
  481. add_wchar(current, ret);
  482. }
  483. inc_syscw(current);
  484. file_end_write(file);
  485. }
  486. return ret;
  487. }
  488. EXPORT_SYMBOL(vfs_write);
  489. static inline loff_t file_pos_read(struct file *file)
  490. {
  491. return file->f_pos;
  492. }
  493. static inline void file_pos_write(struct file *file, loff_t pos)
  494. {
  495. file->f_pos = pos;
  496. }
  497. SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
  498. {
  499. struct fd f = fdget_pos(fd);
  500. ssize_t ret = -EBADF;
  501. if (f.file) {
  502. loff_t pos = file_pos_read(f.file);
  503. ret = vfs_read(f.file, buf, count, &pos);
  504. if (ret >= 0)
  505. file_pos_write(f.file, pos);
  506. fdput_pos(f);
  507. }
  508. return ret;
  509. }
  510. SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
  511. size_t, count)
  512. {
  513. struct fd f = fdget_pos(fd);
  514. ssize_t ret = -EBADF;
  515. if (f.file) {
  516. loff_t pos = file_pos_read(f.file);
  517. ret = vfs_write(f.file, buf, count, &pos);
  518. if (ret >= 0)
  519. file_pos_write(f.file, pos);
  520. fdput_pos(f);
  521. }
  522. return ret;
  523. }
  524. SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
  525. size_t, count, loff_t, pos)
  526. {
  527. struct fd f;
  528. ssize_t ret = -EBADF;
  529. if (pos < 0)
  530. return -EINVAL;
  531. f = fdget(fd);
  532. if (f.file) {
  533. ret = -ESPIPE;
  534. if (f.file->f_mode & FMODE_PREAD)
  535. ret = vfs_read(f.file, buf, count, &pos);
  536. fdput(f);
  537. }
  538. return ret;
  539. }
  540. SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
  541. size_t, count, loff_t, pos)
  542. {
  543. struct fd f;
  544. ssize_t ret = -EBADF;
  545. if (pos < 0)
  546. return -EINVAL;
  547. f = fdget(fd);
  548. if (f.file) {
  549. ret = -ESPIPE;
  550. if (f.file->f_mode & FMODE_PWRITE)
  551. ret = vfs_write(f.file, buf, count, &pos);
  552. fdput(f);
  553. }
  554. return ret;
  555. }
  556. /*
  557. * Reduce an iovec's length in-place. Return the resulting number of segments
  558. */
  559. unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
  560. {
  561. unsigned long seg = 0;
  562. size_t len = 0;
  563. while (seg < nr_segs) {
  564. seg++;
  565. if (len + iov->iov_len >= to) {
  566. iov->iov_len = to - len;
  567. break;
  568. }
  569. len += iov->iov_len;
  570. iov++;
  571. }
  572. return seg;
  573. }
  574. EXPORT_SYMBOL(iov_shorten);
  575. static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov,
  576. unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn)
  577. {
  578. struct kiocb kiocb;
  579. struct iov_iter iter;
  580. ssize_t ret;
  581. init_sync_kiocb(&kiocb, filp);
  582. kiocb.ki_pos = *ppos;
  583. kiocb.ki_nbytes = len;
  584. iov_iter_init(&iter, rw, iov, nr_segs, len);
  585. ret = fn(&kiocb, &iter);
  586. if (ret == -EIOCBQUEUED)
  587. ret = wait_on_sync_kiocb(&kiocb);
  588. *ppos = kiocb.ki_pos;
  589. return ret;
  590. }
  591. static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
  592. unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
  593. {
  594. struct kiocb kiocb;
  595. ssize_t ret;
  596. init_sync_kiocb(&kiocb, filp);
  597. kiocb.ki_pos = *ppos;
  598. kiocb.ki_nbytes = len;
  599. ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
  600. if (ret == -EIOCBQUEUED)
  601. ret = wait_on_sync_kiocb(&kiocb);
  602. *ppos = kiocb.ki_pos;
  603. return ret;
  604. }
  605. /* Do it by hand, with file-ops */
  606. static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
  607. unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
  608. {
  609. struct iovec *vector = iov;
  610. ssize_t ret = 0;
  611. while (nr_segs > 0) {
  612. void __user *base;
  613. size_t len;
  614. ssize_t nr;
  615. base = vector->iov_base;
  616. len = vector->iov_len;
  617. vector++;
  618. nr_segs--;
  619. nr = fn(filp, base, len, ppos);
  620. if (nr < 0) {
  621. if (!ret)
  622. ret = nr;
  623. break;
  624. }
  625. ret += nr;
  626. if (nr != len)
  627. break;
  628. }
  629. return ret;
  630. }
  631. /* A write operation does a read from user space and vice versa */
  632. #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
  633. ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
  634. unsigned long nr_segs, unsigned long fast_segs,
  635. struct iovec *fast_pointer,
  636. struct iovec **ret_pointer)
  637. {
  638. unsigned long seg;
  639. ssize_t ret;
  640. struct iovec *iov = fast_pointer;
  641. /*
  642. * SuS says "The readv() function *may* fail if the iovcnt argument
  643. * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
  644. * traditionally returned zero for zero segments, so...
  645. */
  646. if (nr_segs == 0) {
  647. ret = 0;
  648. goto out;
  649. }
  650. /*
  651. * First get the "struct iovec" from user memory and
  652. * verify all the pointers
  653. */
  654. if (nr_segs > UIO_MAXIOV) {
  655. ret = -EINVAL;
  656. goto out;
  657. }
  658. if (nr_segs > fast_segs) {
  659. iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
  660. if (iov == NULL) {
  661. ret = -ENOMEM;
  662. goto out;
  663. }
  664. }
  665. if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
  666. ret = -EFAULT;
  667. goto out;
  668. }
  669. /*
  670. * According to the Single Unix Specification we should return EINVAL
  671. * if an element length is < 0 when cast to ssize_t or if the
  672. * total length would overflow the ssize_t return value of the
  673. * system call.
  674. *
  675. * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
  676. * overflow case.
  677. */
  678. ret = 0;
  679. for (seg = 0; seg < nr_segs; seg++) {
  680. void __user *buf = iov[seg].iov_base;
  681. ssize_t len = (ssize_t)iov[seg].iov_len;
  682. /* see if we we're about to use an invalid len or if
  683. * it's about to overflow ssize_t */
  684. if (len < 0) {
  685. ret = -EINVAL;
  686. goto out;
  687. }
  688. if (type >= 0
  689. && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
  690. ret = -EFAULT;
  691. goto out;
  692. }
  693. if (len > MAX_RW_COUNT - ret) {
  694. len = MAX_RW_COUNT - ret;
  695. iov[seg].iov_len = len;
  696. }
  697. ret += len;
  698. }
  699. out:
  700. *ret_pointer = iov;
  701. return ret;
  702. }
  703. static ssize_t do_readv_writev(int type, struct file *file,
  704. const struct iovec __user * uvector,
  705. unsigned long nr_segs, loff_t *pos)
  706. {
  707. size_t tot_len;
  708. struct iovec iovstack[UIO_FASTIOV];
  709. struct iovec *iov = iovstack;
  710. ssize_t ret;
  711. io_fn_t fn;
  712. iov_fn_t fnv;
  713. iter_fn_t iter_fn;
  714. ret = rw_copy_check_uvector(type, uvector, nr_segs,
  715. ARRAY_SIZE(iovstack), iovstack, &iov);
  716. if (ret <= 0)
  717. goto out;
  718. tot_len = ret;
  719. ret = rw_verify_area(type, file, pos, tot_len);
  720. if (ret < 0)
  721. goto out;
  722. fnv = NULL;
  723. if (type == READ) {
  724. fn = file->f_op->read;
  725. fnv = file->f_op->aio_read;
  726. iter_fn = file->f_op->read_iter;
  727. } else {
  728. fn = (io_fn_t)file->f_op->write;
  729. fnv = file->f_op->aio_write;
  730. iter_fn = file->f_op->write_iter;
  731. file_start_write(file);
  732. }
  733. if (iter_fn)
  734. ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
  735. pos, iter_fn);
  736. else if (fnv)
  737. ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
  738. pos, fnv);
  739. else
  740. ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
  741. if (type != READ)
  742. file_end_write(file);
  743. out:
  744. if (iov != iovstack)
  745. kfree(iov);
  746. if ((ret + (type == READ)) > 0) {
  747. if (type == READ)
  748. fsnotify_access(file);
  749. else
  750. fsnotify_modify(file);
  751. }
  752. return ret;
  753. }
  754. ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
  755. unsigned long vlen, loff_t *pos)
  756. {
  757. if (!(file->f_mode & FMODE_READ))
  758. return -EBADF;
  759. if (!(file->f_mode & FMODE_CAN_READ))
  760. return -EINVAL;
  761. return do_readv_writev(READ, file, vec, vlen, pos);
  762. }
  763. EXPORT_SYMBOL(vfs_readv);
  764. ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
  765. unsigned long vlen, loff_t *pos)
  766. {
  767. if (!(file->f_mode & FMODE_WRITE))
  768. return -EBADF;
  769. if (!(file->f_mode & FMODE_CAN_WRITE))
  770. return -EINVAL;
  771. return do_readv_writev(WRITE, file, vec, vlen, pos);
  772. }
  773. EXPORT_SYMBOL(vfs_writev);
  774. SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
  775. unsigned long, vlen)
  776. {
  777. struct fd f = fdget_pos(fd);
  778. ssize_t ret = -EBADF;
  779. if (f.file) {
  780. loff_t pos = file_pos_read(f.file);
  781. ret = vfs_readv(f.file, vec, vlen, &pos);
  782. if (ret >= 0)
  783. file_pos_write(f.file, pos);
  784. fdput_pos(f);
  785. }
  786. if (ret > 0)
  787. add_rchar(current, ret);
  788. inc_syscr(current);
  789. return ret;
  790. }
  791. SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
  792. unsigned long, vlen)
  793. {
  794. struct fd f = fdget_pos(fd);
  795. ssize_t ret = -EBADF;
  796. if (f.file) {
  797. loff_t pos = file_pos_read(f.file);
  798. ret = vfs_writev(f.file, vec, vlen, &pos);
  799. if (ret >= 0)
  800. file_pos_write(f.file, pos);
  801. fdput_pos(f);
  802. }
  803. if (ret > 0)
  804. add_wchar(current, ret);
  805. inc_syscw(current);
  806. return ret;
  807. }
  808. static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
  809. {
  810. #define HALF_LONG_BITS (BITS_PER_LONG / 2)
  811. return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
  812. }
  813. SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
  814. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  815. {
  816. loff_t pos = pos_from_hilo(pos_h, pos_l);
  817. struct fd f;
  818. ssize_t ret = -EBADF;
  819. if (pos < 0)
  820. return -EINVAL;
  821. f = fdget(fd);
  822. if (f.file) {
  823. ret = -ESPIPE;
  824. if (f.file->f_mode & FMODE_PREAD)
  825. ret = vfs_readv(f.file, vec, vlen, &pos);
  826. fdput(f);
  827. }
  828. if (ret > 0)
  829. add_rchar(current, ret);
  830. inc_syscr(current);
  831. return ret;
  832. }
  833. SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
  834. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  835. {
  836. loff_t pos = pos_from_hilo(pos_h, pos_l);
  837. struct fd f;
  838. ssize_t ret = -EBADF;
  839. if (pos < 0)
  840. return -EINVAL;
  841. f = fdget(fd);
  842. if (f.file) {
  843. ret = -ESPIPE;
  844. if (f.file->f_mode & FMODE_PWRITE)
  845. ret = vfs_writev(f.file, vec, vlen, &pos);
  846. fdput(f);
  847. }
  848. if (ret > 0)
  849. add_wchar(current, ret);
  850. inc_syscw(current);
  851. return ret;
  852. }
  853. #ifdef CONFIG_COMPAT
  854. static ssize_t compat_do_readv_writev(int type, struct file *file,
  855. const struct compat_iovec __user *uvector,
  856. unsigned long nr_segs, loff_t *pos)
  857. {
  858. compat_ssize_t tot_len;
  859. struct iovec iovstack[UIO_FASTIOV];
  860. struct iovec *iov = iovstack;
  861. ssize_t ret;
  862. io_fn_t fn;
  863. iov_fn_t fnv;
  864. iter_fn_t iter_fn;
  865. ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
  866. UIO_FASTIOV, iovstack, &iov);
  867. if (ret <= 0)
  868. goto out;
  869. tot_len = ret;
  870. ret = rw_verify_area(type, file, pos, tot_len);
  871. if (ret < 0)
  872. goto out;
  873. fnv = NULL;
  874. if (type == READ) {
  875. fn = file->f_op->read;
  876. fnv = file->f_op->aio_read;
  877. iter_fn = file->f_op->read_iter;
  878. } else {
  879. fn = (io_fn_t)file->f_op->write;
  880. fnv = file->f_op->aio_write;
  881. iter_fn = file->f_op->write_iter;
  882. file_start_write(file);
  883. }
  884. if (iter_fn)
  885. ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
  886. pos, iter_fn);
  887. else if (fnv)
  888. ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
  889. pos, fnv);
  890. else
  891. ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
  892. if (type != READ)
  893. file_end_write(file);
  894. out:
  895. if (iov != iovstack)
  896. kfree(iov);
  897. if ((ret + (type == READ)) > 0) {
  898. if (type == READ)
  899. fsnotify_access(file);
  900. else
  901. fsnotify_modify(file);
  902. }
  903. return ret;
  904. }
  905. static size_t compat_readv(struct file *file,
  906. const struct compat_iovec __user *vec,
  907. unsigned long vlen, loff_t *pos)
  908. {
  909. ssize_t ret = -EBADF;
  910. if (!(file->f_mode & FMODE_READ))
  911. goto out;
  912. ret = -EINVAL;
  913. if (!(file->f_mode & FMODE_CAN_READ))
  914. goto out;
  915. ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
  916. out:
  917. if (ret > 0)
  918. add_rchar(current, ret);
  919. inc_syscr(current);
  920. return ret;
  921. }
  922. COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
  923. const struct compat_iovec __user *,vec,
  924. compat_ulong_t, vlen)
  925. {
  926. struct fd f = fdget_pos(fd);
  927. ssize_t ret;
  928. loff_t pos;
  929. if (!f.file)
  930. return -EBADF;
  931. pos = f.file->f_pos;
  932. ret = compat_readv(f.file, vec, vlen, &pos);
  933. if (ret >= 0)
  934. f.file->f_pos = pos;
  935. fdput_pos(f);
  936. return ret;
  937. }
  938. static long __compat_sys_preadv64(unsigned long fd,
  939. const struct compat_iovec __user *vec,
  940. unsigned long vlen, loff_t pos)
  941. {
  942. struct fd f;
  943. ssize_t ret;
  944. if (pos < 0)
  945. return -EINVAL;
  946. f = fdget(fd);
  947. if (!f.file)
  948. return -EBADF;
  949. ret = -ESPIPE;
  950. if (f.file->f_mode & FMODE_PREAD)
  951. ret = compat_readv(f.file, vec, vlen, &pos);
  952. fdput(f);
  953. return ret;
  954. }
  955. #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
  956. COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
  957. const struct compat_iovec __user *,vec,
  958. unsigned long, vlen, loff_t, pos)
  959. {
  960. return __compat_sys_preadv64(fd, vec, vlen, pos);
  961. }
  962. #endif
  963. COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
  964. const struct compat_iovec __user *,vec,
  965. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  966. {
  967. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  968. return __compat_sys_preadv64(fd, vec, vlen, pos);
  969. }
  970. static size_t compat_writev(struct file *file,
  971. const struct compat_iovec __user *vec,
  972. unsigned long vlen, loff_t *pos)
  973. {
  974. ssize_t ret = -EBADF;
  975. if (!(file->f_mode & FMODE_WRITE))
  976. goto out;
  977. ret = -EINVAL;
  978. if (!(file->f_mode & FMODE_CAN_WRITE))
  979. goto out;
  980. ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
  981. out:
  982. if (ret > 0)
  983. add_wchar(current, ret);
  984. inc_syscw(current);
  985. return ret;
  986. }
  987. COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
  988. const struct compat_iovec __user *, vec,
  989. compat_ulong_t, vlen)
  990. {
  991. struct fd f = fdget_pos(fd);
  992. ssize_t ret;
  993. loff_t pos;
  994. if (!f.file)
  995. return -EBADF;
  996. pos = f.file->f_pos;
  997. ret = compat_writev(f.file, vec, vlen, &pos);
  998. if (ret >= 0)
  999. f.file->f_pos = pos;
  1000. fdput_pos(f);
  1001. return ret;
  1002. }
  1003. static long __compat_sys_pwritev64(unsigned long fd,
  1004. const struct compat_iovec __user *vec,
  1005. unsigned long vlen, loff_t pos)
  1006. {
  1007. struct fd f;
  1008. ssize_t ret;
  1009. if (pos < 0)
  1010. return -EINVAL;
  1011. f = fdget(fd);
  1012. if (!f.file)
  1013. return -EBADF;
  1014. ret = -ESPIPE;
  1015. if (f.file->f_mode & FMODE_PWRITE)
  1016. ret = compat_writev(f.file, vec, vlen, &pos);
  1017. fdput(f);
  1018. return ret;
  1019. }
  1020. #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
  1021. COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
  1022. const struct compat_iovec __user *,vec,
  1023. unsigned long, vlen, loff_t, pos)
  1024. {
  1025. return __compat_sys_pwritev64(fd, vec, vlen, pos);
  1026. }
  1027. #endif
  1028. COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
  1029. const struct compat_iovec __user *,vec,
  1030. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  1031. {
  1032. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1033. return __compat_sys_pwritev64(fd, vec, vlen, pos);
  1034. }
  1035. #endif
  1036. static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
  1037. size_t count, loff_t max)
  1038. {
  1039. struct fd in, out;
  1040. struct inode *in_inode, *out_inode;
  1041. loff_t pos;
  1042. loff_t out_pos;
  1043. ssize_t retval;
  1044. int fl;
  1045. /*
  1046. * Get input file, and verify that it is ok..
  1047. */
  1048. retval = -EBADF;
  1049. in = fdget(in_fd);
  1050. if (!in.file)
  1051. goto out;
  1052. if (!(in.file->f_mode & FMODE_READ))
  1053. goto fput_in;
  1054. retval = -ESPIPE;
  1055. if (!ppos) {
  1056. pos = in.file->f_pos;
  1057. } else {
  1058. pos = *ppos;
  1059. if (!(in.file->f_mode & FMODE_PREAD))
  1060. goto fput_in;
  1061. }
  1062. retval = rw_verify_area(READ, in.file, &pos, count);
  1063. if (retval < 0)
  1064. goto fput_in;
  1065. count = retval;
  1066. /*
  1067. * Get output file, and verify that it is ok..
  1068. */
  1069. retval = -EBADF;
  1070. out = fdget(out_fd);
  1071. if (!out.file)
  1072. goto fput_in;
  1073. if (!(out.file->f_mode & FMODE_WRITE))
  1074. goto fput_out;
  1075. retval = -EINVAL;
  1076. in_inode = file_inode(in.file);
  1077. out_inode = file_inode(out.file);
  1078. out_pos = out.file->f_pos;
  1079. retval = rw_verify_area(WRITE, out.file, &out_pos, count);
  1080. if (retval < 0)
  1081. goto fput_out;
  1082. count = retval;
  1083. if (!max)
  1084. max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
  1085. if (unlikely(pos + count > max)) {
  1086. retval = -EOVERFLOW;
  1087. if (pos >= max)
  1088. goto fput_out;
  1089. count = max - pos;
  1090. }
  1091. fl = 0;
  1092. #if 0
  1093. /*
  1094. * We need to debate whether we can enable this or not. The
  1095. * man page documents EAGAIN return for the output at least,
  1096. * and the application is arguably buggy if it doesn't expect
  1097. * EAGAIN on a non-blocking file descriptor.
  1098. */
  1099. if (in.file->f_flags & O_NONBLOCK)
  1100. fl = SPLICE_F_NONBLOCK;
  1101. #endif
  1102. file_start_write(out.file);
  1103. retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
  1104. file_end_write(out.file);
  1105. if (retval > 0) {
  1106. add_rchar(current, retval);
  1107. add_wchar(current, retval);
  1108. fsnotify_access(in.file);
  1109. fsnotify_modify(out.file);
  1110. out.file->f_pos = out_pos;
  1111. if (ppos)
  1112. *ppos = pos;
  1113. else
  1114. in.file->f_pos = pos;
  1115. }
  1116. inc_syscr(current);
  1117. inc_syscw(current);
  1118. if (pos > max)
  1119. retval = -EOVERFLOW;
  1120. fput_out:
  1121. fdput(out);
  1122. fput_in:
  1123. fdput(in);
  1124. out:
  1125. return retval;
  1126. }
  1127. SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
  1128. {
  1129. loff_t pos;
  1130. off_t off;
  1131. ssize_t ret;
  1132. if (offset) {
  1133. if (unlikely(get_user(off, offset)))
  1134. return -EFAULT;
  1135. pos = off;
  1136. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1137. if (unlikely(put_user(pos, offset)))
  1138. return -EFAULT;
  1139. return ret;
  1140. }
  1141. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1142. }
  1143. SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
  1144. {
  1145. loff_t pos;
  1146. ssize_t ret;
  1147. if (offset) {
  1148. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1149. return -EFAULT;
  1150. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1151. if (unlikely(put_user(pos, offset)))
  1152. return -EFAULT;
  1153. return ret;
  1154. }
  1155. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1156. }
  1157. #ifdef CONFIG_COMPAT
  1158. COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
  1159. compat_off_t __user *, offset, compat_size_t, count)
  1160. {
  1161. loff_t pos;
  1162. off_t off;
  1163. ssize_t ret;
  1164. if (offset) {
  1165. if (unlikely(get_user(off, offset)))
  1166. return -EFAULT;
  1167. pos = off;
  1168. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1169. if (unlikely(put_user(pos, offset)))
  1170. return -EFAULT;
  1171. return ret;
  1172. }
  1173. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1174. }
  1175. COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
  1176. compat_loff_t __user *, offset, compat_size_t, count)
  1177. {
  1178. loff_t pos;
  1179. ssize_t ret;
  1180. if (offset) {
  1181. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1182. return -EFAULT;
  1183. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1184. if (unlikely(put_user(pos, offset)))
  1185. return -EFAULT;
  1186. return ret;
  1187. }
  1188. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1189. }
  1190. #endif