read_write.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366
  1. /*
  2. * linux/fs/read_write.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/stat.h>
  8. #include <linux/fcntl.h>
  9. #include <linux/file.h>
  10. #include <linux/uio.h>
  11. #include <linux/aio.h>
  12. #include <linux/fsnotify.h>
  13. #include <linux/security.h>
  14. #include <linux/export.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/splice.h>
  18. #include <linux/compat.h>
  19. #include "internal.h"
  20. #include <asm/uaccess.h>
  21. #include <asm/unistd.h>
  22. typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
  23. typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
  24. unsigned long, loff_t);
  25. typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
  26. const struct file_operations generic_ro_fops = {
  27. .llseek = generic_file_llseek,
  28. .read = new_sync_read,
  29. .read_iter = generic_file_read_iter,
  30. .mmap = generic_file_readonly_mmap,
  31. .splice_read = generic_file_splice_read,
  32. };
  33. EXPORT_SYMBOL(generic_ro_fops);
  34. static inline int unsigned_offsets(struct file *file)
  35. {
  36. return file->f_mode & FMODE_UNSIGNED_OFFSET;
  37. }
  38. /**
  39. * vfs_setpos - update the file offset for lseek
  40. * @file: file structure in question
  41. * @offset: file offset to seek to
  42. * @maxsize: maximum file size
  43. *
  44. * This is a low-level filesystem helper for updating the file offset to
  45. * the value specified by @offset if the given offset is valid and it is
  46. * not equal to the current file offset.
  47. *
  48. * Return the specified offset on success and -EINVAL on invalid offset.
  49. */
  50. loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
  51. {
  52. if (offset < 0 && !unsigned_offsets(file))
  53. return -EINVAL;
  54. if (offset > maxsize)
  55. return -EINVAL;
  56. if (offset != file->f_pos) {
  57. file->f_pos = offset;
  58. file->f_version = 0;
  59. }
  60. return offset;
  61. }
  62. EXPORT_SYMBOL(vfs_setpos);
  63. /**
  64. * generic_file_llseek_size - generic llseek implementation for regular files
  65. * @file: file structure to seek on
  66. * @offset: file offset to seek to
  67. * @whence: type of seek
  68. * @size: max size of this file in file system
  69. * @eof: offset used for SEEK_END position
  70. *
  71. * This is a variant of generic_file_llseek that allows passing in a custom
  72. * maximum file size and a custom EOF position, for e.g. hashed directories
  73. *
  74. * Synchronization:
  75. * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
  76. * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
  77. * read/writes behave like SEEK_SET against seeks.
  78. */
  79. loff_t
  80. generic_file_llseek_size(struct file *file, loff_t offset, int whence,
  81. loff_t maxsize, loff_t eof)
  82. {
  83. switch (whence) {
  84. case SEEK_END:
  85. offset += eof;
  86. break;
  87. case SEEK_CUR:
  88. /*
  89. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  90. * position-querying operation. Avoid rewriting the "same"
  91. * f_pos value back to the file because a concurrent read(),
  92. * write() or lseek() might have altered it
  93. */
  94. if (offset == 0)
  95. return file->f_pos;
  96. /*
  97. * f_lock protects against read/modify/write race with other
  98. * SEEK_CURs. Note that parallel writes and reads behave
  99. * like SEEK_SET.
  100. */
  101. spin_lock(&file->f_lock);
  102. offset = vfs_setpos(file, file->f_pos + offset, maxsize);
  103. spin_unlock(&file->f_lock);
  104. return offset;
  105. case SEEK_DATA:
  106. /*
  107. * In the generic case the entire file is data, so as long as
  108. * offset isn't at the end of the file then the offset is data.
  109. */
  110. if (offset >= eof)
  111. return -ENXIO;
  112. break;
  113. case SEEK_HOLE:
  114. /*
  115. * There is a virtual hole at the end of the file, so as long as
  116. * offset isn't i_size or larger, return i_size.
  117. */
  118. if (offset >= eof)
  119. return -ENXIO;
  120. offset = eof;
  121. break;
  122. }
  123. return vfs_setpos(file, offset, maxsize);
  124. }
  125. EXPORT_SYMBOL(generic_file_llseek_size);
  126. /**
  127. * generic_file_llseek - generic llseek implementation for regular files
  128. * @file: file structure to seek on
  129. * @offset: file offset to seek to
  130. * @whence: type of seek
  131. *
  132. * This is a generic implemenation of ->llseek useable for all normal local
  133. * filesystems. It just updates the file offset to the value specified by
  134. * @offset and @whence.
  135. */
  136. loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
  137. {
  138. struct inode *inode = file->f_mapping->host;
  139. return generic_file_llseek_size(file, offset, whence,
  140. inode->i_sb->s_maxbytes,
  141. i_size_read(inode));
  142. }
  143. EXPORT_SYMBOL(generic_file_llseek);
  144. /**
  145. * fixed_size_llseek - llseek implementation for fixed-sized devices
  146. * @file: file structure to seek on
  147. * @offset: file offset to seek to
  148. * @whence: type of seek
  149. * @size: size of the file
  150. *
  151. */
  152. loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
  153. {
  154. switch (whence) {
  155. case SEEK_SET: case SEEK_CUR: case SEEK_END:
  156. return generic_file_llseek_size(file, offset, whence,
  157. size, size);
  158. default:
  159. return -EINVAL;
  160. }
  161. }
  162. EXPORT_SYMBOL(fixed_size_llseek);
  163. /**
  164. * noop_llseek - No Operation Performed llseek implementation
  165. * @file: file structure to seek on
  166. * @offset: file offset to seek to
  167. * @whence: type of seek
  168. *
  169. * This is an implementation of ->llseek useable for the rare special case when
  170. * userspace expects the seek to succeed but the (device) file is actually not
  171. * able to perform the seek. In this case you use noop_llseek() instead of
  172. * falling back to the default implementation of ->llseek.
  173. */
  174. loff_t noop_llseek(struct file *file, loff_t offset, int whence)
  175. {
  176. return file->f_pos;
  177. }
  178. EXPORT_SYMBOL(noop_llseek);
  179. loff_t no_llseek(struct file *file, loff_t offset, int whence)
  180. {
  181. return -ESPIPE;
  182. }
  183. EXPORT_SYMBOL(no_llseek);
  184. loff_t default_llseek(struct file *file, loff_t offset, int whence)
  185. {
  186. struct inode *inode = file_inode(file);
  187. loff_t retval;
  188. mutex_lock(&inode->i_mutex);
  189. switch (whence) {
  190. case SEEK_END:
  191. offset += i_size_read(inode);
  192. break;
  193. case SEEK_CUR:
  194. if (offset == 0) {
  195. retval = file->f_pos;
  196. goto out;
  197. }
  198. offset += file->f_pos;
  199. break;
  200. case SEEK_DATA:
  201. /*
  202. * In the generic case the entire file is data, so as
  203. * long as offset isn't at the end of the file then the
  204. * offset is data.
  205. */
  206. if (offset >= inode->i_size) {
  207. retval = -ENXIO;
  208. goto out;
  209. }
  210. break;
  211. case SEEK_HOLE:
  212. /*
  213. * There is a virtual hole at the end of the file, so
  214. * as long as offset isn't i_size or larger, return
  215. * i_size.
  216. */
  217. if (offset >= inode->i_size) {
  218. retval = -ENXIO;
  219. goto out;
  220. }
  221. offset = inode->i_size;
  222. break;
  223. }
  224. retval = -EINVAL;
  225. if (offset >= 0 || unsigned_offsets(file)) {
  226. if (offset != file->f_pos) {
  227. file->f_pos = offset;
  228. file->f_version = 0;
  229. }
  230. retval = offset;
  231. }
  232. out:
  233. mutex_unlock(&inode->i_mutex);
  234. return retval;
  235. }
  236. EXPORT_SYMBOL(default_llseek);
  237. loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
  238. {
  239. loff_t (*fn)(struct file *, loff_t, int);
  240. fn = no_llseek;
  241. if (file->f_mode & FMODE_LSEEK) {
  242. if (file->f_op->llseek)
  243. fn = file->f_op->llseek;
  244. }
  245. return fn(file, offset, whence);
  246. }
  247. EXPORT_SYMBOL(vfs_llseek);
  248. static inline struct fd fdget_pos(int fd)
  249. {
  250. return __to_fd(__fdget_pos(fd));
  251. }
  252. static inline void fdput_pos(struct fd f)
  253. {
  254. if (f.flags & FDPUT_POS_UNLOCK)
  255. mutex_unlock(&f.file->f_pos_lock);
  256. fdput(f);
  257. }
  258. SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
  259. {
  260. off_t retval;
  261. struct fd f = fdget_pos(fd);
  262. if (!f.file)
  263. return -EBADF;
  264. retval = -EINVAL;
  265. if (whence <= SEEK_MAX) {
  266. loff_t res = vfs_llseek(f.file, offset, whence);
  267. retval = res;
  268. if (res != (loff_t)retval)
  269. retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
  270. }
  271. fdput_pos(f);
  272. return retval;
  273. }
  274. #ifdef CONFIG_COMPAT
  275. COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
  276. {
  277. return sys_lseek(fd, offset, whence);
  278. }
  279. #endif
  280. #ifdef __ARCH_WANT_SYS_LLSEEK
  281. SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
  282. unsigned long, offset_low, loff_t __user *, result,
  283. unsigned int, whence)
  284. {
  285. int retval;
  286. struct fd f = fdget_pos(fd);
  287. loff_t offset;
  288. if (!f.file)
  289. return -EBADF;
  290. retval = -EINVAL;
  291. if (whence > SEEK_MAX)
  292. goto out_putf;
  293. offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
  294. whence);
  295. retval = (int)offset;
  296. if (offset >= 0) {
  297. retval = -EFAULT;
  298. if (!copy_to_user(result, &offset, sizeof(offset)))
  299. retval = 0;
  300. }
  301. out_putf:
  302. fdput_pos(f);
  303. return retval;
  304. }
  305. #endif
  306. /*
  307. * rw_verify_area doesn't like huge counts. We limit
  308. * them to something that fits in "int" so that others
  309. * won't have to do range checks all the time.
  310. */
  311. int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
  312. {
  313. struct inode *inode;
  314. loff_t pos;
  315. int retval = -EINVAL;
  316. inode = file_inode(file);
  317. if (unlikely((ssize_t) count < 0))
  318. return retval;
  319. pos = *ppos;
  320. if (unlikely(pos < 0)) {
  321. if (!unsigned_offsets(file))
  322. return retval;
  323. if (count >= -pos) /* both values are in 0..LLONG_MAX */
  324. return -EOVERFLOW;
  325. } else if (unlikely((loff_t) (pos + count) < 0)) {
  326. if (!unsigned_offsets(file))
  327. return retval;
  328. }
  329. if (unlikely(inode->i_flock && mandatory_lock(inode))) {
  330. retval = locks_mandatory_area(
  331. read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
  332. inode, file, pos, count);
  333. if (retval < 0)
  334. return retval;
  335. }
  336. retval = security_file_permission(file,
  337. read_write == READ ? MAY_READ : MAY_WRITE);
  338. if (retval)
  339. return retval;
  340. return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
  341. }
  342. ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  343. {
  344. struct iovec iov = { .iov_base = buf, .iov_len = len };
  345. struct kiocb kiocb;
  346. ssize_t ret;
  347. init_sync_kiocb(&kiocb, filp);
  348. kiocb.ki_pos = *ppos;
  349. kiocb.ki_nbytes = len;
  350. ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
  351. if (-EIOCBQUEUED == ret)
  352. ret = wait_on_sync_kiocb(&kiocb);
  353. *ppos = kiocb.ki_pos;
  354. return ret;
  355. }
  356. EXPORT_SYMBOL(do_sync_read);
  357. ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  358. {
  359. struct iovec iov = { .iov_base = buf, .iov_len = len };
  360. struct kiocb kiocb;
  361. struct iov_iter iter;
  362. ssize_t ret;
  363. init_sync_kiocb(&kiocb, filp);
  364. kiocb.ki_pos = *ppos;
  365. kiocb.ki_nbytes = len;
  366. iov_iter_init(&iter, READ, &iov, 1, len);
  367. ret = filp->f_op->read_iter(&kiocb, &iter);
  368. if (-EIOCBQUEUED == ret)
  369. ret = wait_on_sync_kiocb(&kiocb);
  370. *ppos = kiocb.ki_pos;
  371. return ret;
  372. }
  373. EXPORT_SYMBOL(new_sync_read);
  374. ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
  375. {
  376. ssize_t ret;
  377. if (!(file->f_mode & FMODE_READ))
  378. return -EBADF;
  379. if (!(file->f_mode & FMODE_CAN_READ))
  380. return -EINVAL;
  381. if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
  382. return -EFAULT;
  383. ret = rw_verify_area(READ, file, pos, count);
  384. if (ret >= 0) {
  385. count = ret;
  386. if (file->f_op->read)
  387. ret = file->f_op->read(file, buf, count, pos);
  388. else if (file->f_op->aio_read)
  389. ret = do_sync_read(file, buf, count, pos);
  390. else
  391. ret = new_sync_read(file, buf, count, pos);
  392. if (ret > 0) {
  393. fsnotify_access(file);
  394. add_rchar(current, ret);
  395. }
  396. inc_syscr(current);
  397. }
  398. return ret;
  399. }
  400. EXPORT_SYMBOL(vfs_read);
  401. ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  402. {
  403. struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
  404. struct kiocb kiocb;
  405. ssize_t ret;
  406. init_sync_kiocb(&kiocb, filp);
  407. kiocb.ki_pos = *ppos;
  408. kiocb.ki_nbytes = len;
  409. ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
  410. if (-EIOCBQUEUED == ret)
  411. ret = wait_on_sync_kiocb(&kiocb);
  412. *ppos = kiocb.ki_pos;
  413. return ret;
  414. }
  415. EXPORT_SYMBOL(do_sync_write);
  416. ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  417. {
  418. struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
  419. struct kiocb kiocb;
  420. struct iov_iter iter;
  421. ssize_t ret;
  422. init_sync_kiocb(&kiocb, filp);
  423. kiocb.ki_pos = *ppos;
  424. kiocb.ki_nbytes = len;
  425. iov_iter_init(&iter, WRITE, &iov, 1, len);
  426. ret = filp->f_op->write_iter(&kiocb, &iter);
  427. if (-EIOCBQUEUED == ret)
  428. ret = wait_on_sync_kiocb(&kiocb);
  429. *ppos = kiocb.ki_pos;
  430. return ret;
  431. }
  432. EXPORT_SYMBOL(new_sync_write);
  433. ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
  434. {
  435. mm_segment_t old_fs;
  436. const char __user *p;
  437. ssize_t ret;
  438. if (!(file->f_mode & FMODE_CAN_WRITE))
  439. return -EINVAL;
  440. old_fs = get_fs();
  441. set_fs(get_ds());
  442. p = (__force const char __user *)buf;
  443. if (count > MAX_RW_COUNT)
  444. count = MAX_RW_COUNT;
  445. if (file->f_op->write)
  446. ret = file->f_op->write(file, p, count, pos);
  447. else if (file->f_op->aio_write)
  448. ret = do_sync_write(file, p, count, pos);
  449. else
  450. ret = new_sync_write(file, p, count, pos);
  451. set_fs(old_fs);
  452. if (ret > 0) {
  453. fsnotify_modify(file);
  454. add_wchar(current, ret);
  455. }
  456. inc_syscw(current);
  457. return ret;
  458. }
  459. ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
  460. {
  461. ssize_t ret;
  462. if (!(file->f_mode & FMODE_WRITE))
  463. return -EBADF;
  464. if (!(file->f_mode & FMODE_CAN_WRITE))
  465. return -EINVAL;
  466. if (unlikely(!access_ok(VERIFY_READ, buf, count)))
  467. return -EFAULT;
  468. ret = rw_verify_area(WRITE, file, pos, count);
  469. if (ret >= 0) {
  470. count = ret;
  471. file_start_write(file);
  472. if (file->f_op->write)
  473. ret = file->f_op->write(file, buf, count, pos);
  474. else if (file->f_op->aio_write)
  475. ret = do_sync_write(file, buf, count, pos);
  476. else
  477. ret = new_sync_write(file, buf, count, pos);
  478. if (ret > 0) {
  479. fsnotify_modify(file);
  480. add_wchar(current, ret);
  481. }
  482. inc_syscw(current);
  483. file_end_write(file);
  484. }
  485. return ret;
  486. }
  487. EXPORT_SYMBOL(vfs_write);
  488. static inline loff_t file_pos_read(struct file *file)
  489. {
  490. return file->f_pos;
  491. }
  492. static inline void file_pos_write(struct file *file, loff_t pos)
  493. {
  494. file->f_pos = pos;
  495. }
  496. SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
  497. {
  498. struct fd f = fdget_pos(fd);
  499. ssize_t ret = -EBADF;
  500. if (f.file) {
  501. loff_t pos = file_pos_read(f.file);
  502. ret = vfs_read(f.file, buf, count, &pos);
  503. if (ret >= 0)
  504. file_pos_write(f.file, pos);
  505. fdput_pos(f);
  506. }
  507. return ret;
  508. }
  509. SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
  510. size_t, count)
  511. {
  512. struct fd f = fdget_pos(fd);
  513. ssize_t ret = -EBADF;
  514. if (f.file) {
  515. loff_t pos = file_pos_read(f.file);
  516. ret = vfs_write(f.file, buf, count, &pos);
  517. if (ret >= 0)
  518. file_pos_write(f.file, pos);
  519. fdput_pos(f);
  520. }
  521. return ret;
  522. }
  523. SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
  524. size_t, count, loff_t, pos)
  525. {
  526. struct fd f;
  527. ssize_t ret = -EBADF;
  528. if (pos < 0)
  529. return -EINVAL;
  530. f = fdget(fd);
  531. if (f.file) {
  532. ret = -ESPIPE;
  533. if (f.file->f_mode & FMODE_PREAD)
  534. ret = vfs_read(f.file, buf, count, &pos);
  535. fdput(f);
  536. }
  537. return ret;
  538. }
  539. SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
  540. size_t, count, loff_t, pos)
  541. {
  542. struct fd f;
  543. ssize_t ret = -EBADF;
  544. if (pos < 0)
  545. return -EINVAL;
  546. f = fdget(fd);
  547. if (f.file) {
  548. ret = -ESPIPE;
  549. if (f.file->f_mode & FMODE_PWRITE)
  550. ret = vfs_write(f.file, buf, count, &pos);
  551. fdput(f);
  552. }
  553. return ret;
  554. }
  555. /*
  556. * Reduce an iovec's length in-place. Return the resulting number of segments
  557. */
  558. unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
  559. {
  560. unsigned long seg = 0;
  561. size_t len = 0;
  562. while (seg < nr_segs) {
  563. seg++;
  564. if (len + iov->iov_len >= to) {
  565. iov->iov_len = to - len;
  566. break;
  567. }
  568. len += iov->iov_len;
  569. iov++;
  570. }
  571. return seg;
  572. }
  573. EXPORT_SYMBOL(iov_shorten);
  574. static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov,
  575. unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn)
  576. {
  577. struct kiocb kiocb;
  578. struct iov_iter iter;
  579. ssize_t ret;
  580. init_sync_kiocb(&kiocb, filp);
  581. kiocb.ki_pos = *ppos;
  582. kiocb.ki_nbytes = len;
  583. iov_iter_init(&iter, rw, iov, nr_segs, len);
  584. ret = fn(&kiocb, &iter);
  585. if (ret == -EIOCBQUEUED)
  586. ret = wait_on_sync_kiocb(&kiocb);
  587. *ppos = kiocb.ki_pos;
  588. return ret;
  589. }
  590. static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
  591. unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
  592. {
  593. struct kiocb kiocb;
  594. ssize_t ret;
  595. init_sync_kiocb(&kiocb, filp);
  596. kiocb.ki_pos = *ppos;
  597. kiocb.ki_nbytes = len;
  598. ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
  599. if (ret == -EIOCBQUEUED)
  600. ret = wait_on_sync_kiocb(&kiocb);
  601. *ppos = kiocb.ki_pos;
  602. return ret;
  603. }
  604. /* Do it by hand, with file-ops */
  605. static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
  606. unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
  607. {
  608. struct iovec *vector = iov;
  609. ssize_t ret = 0;
  610. while (nr_segs > 0) {
  611. void __user *base;
  612. size_t len;
  613. ssize_t nr;
  614. base = vector->iov_base;
  615. len = vector->iov_len;
  616. vector++;
  617. nr_segs--;
  618. nr = fn(filp, base, len, ppos);
  619. if (nr < 0) {
  620. if (!ret)
  621. ret = nr;
  622. break;
  623. }
  624. ret += nr;
  625. if (nr != len)
  626. break;
  627. }
  628. return ret;
  629. }
  630. /* A write operation does a read from user space and vice versa */
  631. #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
  632. ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
  633. unsigned long nr_segs, unsigned long fast_segs,
  634. struct iovec *fast_pointer,
  635. struct iovec **ret_pointer)
  636. {
  637. unsigned long seg;
  638. ssize_t ret;
  639. struct iovec *iov = fast_pointer;
  640. /*
  641. * SuS says "The readv() function *may* fail if the iovcnt argument
  642. * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
  643. * traditionally returned zero for zero segments, so...
  644. */
  645. if (nr_segs == 0) {
  646. ret = 0;
  647. goto out;
  648. }
  649. /*
  650. * First get the "struct iovec" from user memory and
  651. * verify all the pointers
  652. */
  653. if (nr_segs > UIO_MAXIOV) {
  654. ret = -EINVAL;
  655. goto out;
  656. }
  657. if (nr_segs > fast_segs) {
  658. iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
  659. if (iov == NULL) {
  660. ret = -ENOMEM;
  661. goto out;
  662. }
  663. }
  664. if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
  665. ret = -EFAULT;
  666. goto out;
  667. }
  668. /*
  669. * According to the Single Unix Specification we should return EINVAL
  670. * if an element length is < 0 when cast to ssize_t or if the
  671. * total length would overflow the ssize_t return value of the
  672. * system call.
  673. *
  674. * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
  675. * overflow case.
  676. */
  677. ret = 0;
  678. for (seg = 0; seg < nr_segs; seg++) {
  679. void __user *buf = iov[seg].iov_base;
  680. ssize_t len = (ssize_t)iov[seg].iov_len;
  681. /* see if we we're about to use an invalid len or if
  682. * it's about to overflow ssize_t */
  683. if (len < 0) {
  684. ret = -EINVAL;
  685. goto out;
  686. }
  687. if (type >= 0
  688. && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
  689. ret = -EFAULT;
  690. goto out;
  691. }
  692. if (len > MAX_RW_COUNT - ret) {
  693. len = MAX_RW_COUNT - ret;
  694. iov[seg].iov_len = len;
  695. }
  696. ret += len;
  697. }
  698. out:
  699. *ret_pointer = iov;
  700. return ret;
  701. }
  702. static ssize_t do_readv_writev(int type, struct file *file,
  703. const struct iovec __user * uvector,
  704. unsigned long nr_segs, loff_t *pos)
  705. {
  706. size_t tot_len;
  707. struct iovec iovstack[UIO_FASTIOV];
  708. struct iovec *iov = iovstack;
  709. ssize_t ret;
  710. io_fn_t fn;
  711. iov_fn_t fnv;
  712. iter_fn_t iter_fn;
  713. ret = rw_copy_check_uvector(type, uvector, nr_segs,
  714. ARRAY_SIZE(iovstack), iovstack, &iov);
  715. if (ret <= 0)
  716. goto out;
  717. tot_len = ret;
  718. ret = rw_verify_area(type, file, pos, tot_len);
  719. if (ret < 0)
  720. goto out;
  721. fnv = NULL;
  722. if (type == READ) {
  723. fn = file->f_op->read;
  724. fnv = file->f_op->aio_read;
  725. iter_fn = file->f_op->read_iter;
  726. } else {
  727. fn = (io_fn_t)file->f_op->write;
  728. fnv = file->f_op->aio_write;
  729. iter_fn = file->f_op->write_iter;
  730. file_start_write(file);
  731. }
  732. if (iter_fn)
  733. ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
  734. pos, iter_fn);
  735. else if (fnv)
  736. ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
  737. pos, fnv);
  738. else
  739. ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
  740. if (type != READ)
  741. file_end_write(file);
  742. out:
  743. if (iov != iovstack)
  744. kfree(iov);
  745. if ((ret + (type == READ)) > 0) {
  746. if (type == READ)
  747. fsnotify_access(file);
  748. else
  749. fsnotify_modify(file);
  750. }
  751. return ret;
  752. }
  753. ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
  754. unsigned long vlen, loff_t *pos)
  755. {
  756. if (!(file->f_mode & FMODE_READ))
  757. return -EBADF;
  758. if (!(file->f_mode & FMODE_CAN_READ))
  759. return -EINVAL;
  760. return do_readv_writev(READ, file, vec, vlen, pos);
  761. }
  762. EXPORT_SYMBOL(vfs_readv);
  763. ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
  764. unsigned long vlen, loff_t *pos)
  765. {
  766. if (!(file->f_mode & FMODE_WRITE))
  767. return -EBADF;
  768. if (!(file->f_mode & FMODE_CAN_WRITE))
  769. return -EINVAL;
  770. return do_readv_writev(WRITE, file, vec, vlen, pos);
  771. }
  772. EXPORT_SYMBOL(vfs_writev);
  773. SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
  774. unsigned long, vlen)
  775. {
  776. struct fd f = fdget_pos(fd);
  777. ssize_t ret = -EBADF;
  778. if (f.file) {
  779. loff_t pos = file_pos_read(f.file);
  780. ret = vfs_readv(f.file, vec, vlen, &pos);
  781. if (ret >= 0)
  782. file_pos_write(f.file, pos);
  783. fdput_pos(f);
  784. }
  785. if (ret > 0)
  786. add_rchar(current, ret);
  787. inc_syscr(current);
  788. return ret;
  789. }
  790. SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
  791. unsigned long, vlen)
  792. {
  793. struct fd f = fdget_pos(fd);
  794. ssize_t ret = -EBADF;
  795. if (f.file) {
  796. loff_t pos = file_pos_read(f.file);
  797. ret = vfs_writev(f.file, vec, vlen, &pos);
  798. if (ret >= 0)
  799. file_pos_write(f.file, pos);
  800. fdput_pos(f);
  801. }
  802. if (ret > 0)
  803. add_wchar(current, ret);
  804. inc_syscw(current);
  805. return ret;
  806. }
  807. static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
  808. {
  809. #define HALF_LONG_BITS (BITS_PER_LONG / 2)
  810. return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
  811. }
  812. SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
  813. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  814. {
  815. loff_t pos = pos_from_hilo(pos_h, pos_l);
  816. struct fd f;
  817. ssize_t ret = -EBADF;
  818. if (pos < 0)
  819. return -EINVAL;
  820. f = fdget(fd);
  821. if (f.file) {
  822. ret = -ESPIPE;
  823. if (f.file->f_mode & FMODE_PREAD)
  824. ret = vfs_readv(f.file, vec, vlen, &pos);
  825. fdput(f);
  826. }
  827. if (ret > 0)
  828. add_rchar(current, ret);
  829. inc_syscr(current);
  830. return ret;
  831. }
  832. SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
  833. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  834. {
  835. loff_t pos = pos_from_hilo(pos_h, pos_l);
  836. struct fd f;
  837. ssize_t ret = -EBADF;
  838. if (pos < 0)
  839. return -EINVAL;
  840. f = fdget(fd);
  841. if (f.file) {
  842. ret = -ESPIPE;
  843. if (f.file->f_mode & FMODE_PWRITE)
  844. ret = vfs_writev(f.file, vec, vlen, &pos);
  845. fdput(f);
  846. }
  847. if (ret > 0)
  848. add_wchar(current, ret);
  849. inc_syscw(current);
  850. return ret;
  851. }
  852. #ifdef CONFIG_COMPAT
  853. static ssize_t compat_do_readv_writev(int type, struct file *file,
  854. const struct compat_iovec __user *uvector,
  855. unsigned long nr_segs, loff_t *pos)
  856. {
  857. compat_ssize_t tot_len;
  858. struct iovec iovstack[UIO_FASTIOV];
  859. struct iovec *iov = iovstack;
  860. ssize_t ret;
  861. io_fn_t fn;
  862. iov_fn_t fnv;
  863. iter_fn_t iter_fn;
  864. ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
  865. UIO_FASTIOV, iovstack, &iov);
  866. if (ret <= 0)
  867. goto out;
  868. tot_len = ret;
  869. ret = rw_verify_area(type, file, pos, tot_len);
  870. if (ret < 0)
  871. goto out;
  872. fnv = NULL;
  873. if (type == READ) {
  874. fn = file->f_op->read;
  875. fnv = file->f_op->aio_read;
  876. iter_fn = file->f_op->read_iter;
  877. } else {
  878. fn = (io_fn_t)file->f_op->write;
  879. fnv = file->f_op->aio_write;
  880. iter_fn = file->f_op->write_iter;
  881. file_start_write(file);
  882. }
  883. if (iter_fn)
  884. ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
  885. pos, iter_fn);
  886. else if (fnv)
  887. ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
  888. pos, fnv);
  889. else
  890. ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
  891. if (type != READ)
  892. file_end_write(file);
  893. out:
  894. if (iov != iovstack)
  895. kfree(iov);
  896. if ((ret + (type == READ)) > 0) {
  897. if (type == READ)
  898. fsnotify_access(file);
  899. else
  900. fsnotify_modify(file);
  901. }
  902. return ret;
  903. }
  904. static size_t compat_readv(struct file *file,
  905. const struct compat_iovec __user *vec,
  906. unsigned long vlen, loff_t *pos)
  907. {
  908. ssize_t ret = -EBADF;
  909. if (!(file->f_mode & FMODE_READ))
  910. goto out;
  911. ret = -EINVAL;
  912. if (!(file->f_mode & FMODE_CAN_READ))
  913. goto out;
  914. ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
  915. out:
  916. if (ret > 0)
  917. add_rchar(current, ret);
  918. inc_syscr(current);
  919. return ret;
  920. }
  921. COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
  922. const struct compat_iovec __user *,vec,
  923. compat_ulong_t, vlen)
  924. {
  925. struct fd f = fdget_pos(fd);
  926. ssize_t ret;
  927. loff_t pos;
  928. if (!f.file)
  929. return -EBADF;
  930. pos = f.file->f_pos;
  931. ret = compat_readv(f.file, vec, vlen, &pos);
  932. if (ret >= 0)
  933. f.file->f_pos = pos;
  934. fdput_pos(f);
  935. return ret;
  936. }
  937. static long __compat_sys_preadv64(unsigned long fd,
  938. const struct compat_iovec __user *vec,
  939. unsigned long vlen, loff_t pos)
  940. {
  941. struct fd f;
  942. ssize_t ret;
  943. if (pos < 0)
  944. return -EINVAL;
  945. f = fdget(fd);
  946. if (!f.file)
  947. return -EBADF;
  948. ret = -ESPIPE;
  949. if (f.file->f_mode & FMODE_PREAD)
  950. ret = compat_readv(f.file, vec, vlen, &pos);
  951. fdput(f);
  952. return ret;
  953. }
  954. #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
  955. COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
  956. const struct compat_iovec __user *,vec,
  957. unsigned long, vlen, loff_t, pos)
  958. {
  959. return __compat_sys_preadv64(fd, vec, vlen, pos);
  960. }
  961. #endif
  962. COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
  963. const struct compat_iovec __user *,vec,
  964. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  965. {
  966. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  967. return __compat_sys_preadv64(fd, vec, vlen, pos);
  968. }
  969. static size_t compat_writev(struct file *file,
  970. const struct compat_iovec __user *vec,
  971. unsigned long vlen, loff_t *pos)
  972. {
  973. ssize_t ret = -EBADF;
  974. if (!(file->f_mode & FMODE_WRITE))
  975. goto out;
  976. ret = -EINVAL;
  977. if (!(file->f_mode & FMODE_CAN_WRITE))
  978. goto out;
  979. ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
  980. out:
  981. if (ret > 0)
  982. add_wchar(current, ret);
  983. inc_syscw(current);
  984. return ret;
  985. }
  986. COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
  987. const struct compat_iovec __user *, vec,
  988. compat_ulong_t, vlen)
  989. {
  990. struct fd f = fdget_pos(fd);
  991. ssize_t ret;
  992. loff_t pos;
  993. if (!f.file)
  994. return -EBADF;
  995. pos = f.file->f_pos;
  996. ret = compat_writev(f.file, vec, vlen, &pos);
  997. if (ret >= 0)
  998. f.file->f_pos = pos;
  999. fdput_pos(f);
  1000. return ret;
  1001. }
  1002. static long __compat_sys_pwritev64(unsigned long fd,
  1003. const struct compat_iovec __user *vec,
  1004. unsigned long vlen, loff_t pos)
  1005. {
  1006. struct fd f;
  1007. ssize_t ret;
  1008. if (pos < 0)
  1009. return -EINVAL;
  1010. f = fdget(fd);
  1011. if (!f.file)
  1012. return -EBADF;
  1013. ret = -ESPIPE;
  1014. if (f.file->f_mode & FMODE_PWRITE)
  1015. ret = compat_writev(f.file, vec, vlen, &pos);
  1016. fdput(f);
  1017. return ret;
  1018. }
  1019. #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
  1020. COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
  1021. const struct compat_iovec __user *,vec,
  1022. unsigned long, vlen, loff_t, pos)
  1023. {
  1024. return __compat_sys_pwritev64(fd, vec, vlen, pos);
  1025. }
  1026. #endif
  1027. COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
  1028. const struct compat_iovec __user *,vec,
  1029. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  1030. {
  1031. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1032. return __compat_sys_pwritev64(fd, vec, vlen, pos);
  1033. }
  1034. #endif
  1035. static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
  1036. size_t count, loff_t max)
  1037. {
  1038. struct fd in, out;
  1039. struct inode *in_inode, *out_inode;
  1040. loff_t pos;
  1041. loff_t out_pos;
  1042. ssize_t retval;
  1043. int fl;
  1044. /*
  1045. * Get input file, and verify that it is ok..
  1046. */
  1047. retval = -EBADF;
  1048. in = fdget(in_fd);
  1049. if (!in.file)
  1050. goto out;
  1051. if (!(in.file->f_mode & FMODE_READ))
  1052. goto fput_in;
  1053. retval = -ESPIPE;
  1054. if (!ppos) {
  1055. pos = in.file->f_pos;
  1056. } else {
  1057. pos = *ppos;
  1058. if (!(in.file->f_mode & FMODE_PREAD))
  1059. goto fput_in;
  1060. }
  1061. retval = rw_verify_area(READ, in.file, &pos, count);
  1062. if (retval < 0)
  1063. goto fput_in;
  1064. count = retval;
  1065. /*
  1066. * Get output file, and verify that it is ok..
  1067. */
  1068. retval = -EBADF;
  1069. out = fdget(out_fd);
  1070. if (!out.file)
  1071. goto fput_in;
  1072. if (!(out.file->f_mode & FMODE_WRITE))
  1073. goto fput_out;
  1074. retval = -EINVAL;
  1075. in_inode = file_inode(in.file);
  1076. out_inode = file_inode(out.file);
  1077. out_pos = out.file->f_pos;
  1078. retval = rw_verify_area(WRITE, out.file, &out_pos, count);
  1079. if (retval < 0)
  1080. goto fput_out;
  1081. count = retval;
  1082. if (!max)
  1083. max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
  1084. if (unlikely(pos + count > max)) {
  1085. retval = -EOVERFLOW;
  1086. if (pos >= max)
  1087. goto fput_out;
  1088. count = max - pos;
  1089. }
  1090. fl = 0;
  1091. #if 0
  1092. /*
  1093. * We need to debate whether we can enable this or not. The
  1094. * man page documents EAGAIN return for the output at least,
  1095. * and the application is arguably buggy if it doesn't expect
  1096. * EAGAIN on a non-blocking file descriptor.
  1097. */
  1098. if (in.file->f_flags & O_NONBLOCK)
  1099. fl = SPLICE_F_NONBLOCK;
  1100. #endif
  1101. file_start_write(out.file);
  1102. retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
  1103. file_end_write(out.file);
  1104. if (retval > 0) {
  1105. add_rchar(current, retval);
  1106. add_wchar(current, retval);
  1107. fsnotify_access(in.file);
  1108. fsnotify_modify(out.file);
  1109. out.file->f_pos = out_pos;
  1110. if (ppos)
  1111. *ppos = pos;
  1112. else
  1113. in.file->f_pos = pos;
  1114. }
  1115. inc_syscr(current);
  1116. inc_syscw(current);
  1117. if (pos > max)
  1118. retval = -EOVERFLOW;
  1119. fput_out:
  1120. fdput(out);
  1121. fput_in:
  1122. fdput(in);
  1123. out:
  1124. return retval;
  1125. }
  1126. SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
  1127. {
  1128. loff_t pos;
  1129. off_t off;
  1130. ssize_t ret;
  1131. if (offset) {
  1132. if (unlikely(get_user(off, offset)))
  1133. return -EFAULT;
  1134. pos = off;
  1135. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1136. if (unlikely(put_user(pos, offset)))
  1137. return -EFAULT;
  1138. return ret;
  1139. }
  1140. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1141. }
  1142. SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
  1143. {
  1144. loff_t pos;
  1145. ssize_t ret;
  1146. if (offset) {
  1147. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1148. return -EFAULT;
  1149. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1150. if (unlikely(put_user(pos, offset)))
  1151. return -EFAULT;
  1152. return ret;
  1153. }
  1154. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1155. }
  1156. #ifdef CONFIG_COMPAT
  1157. COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
  1158. compat_off_t __user *, offset, compat_size_t, count)
  1159. {
  1160. loff_t pos;
  1161. off_t off;
  1162. ssize_t ret;
  1163. if (offset) {
  1164. if (unlikely(get_user(off, offset)))
  1165. return -EFAULT;
  1166. pos = off;
  1167. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1168. if (unlikely(put_user(pos, offset)))
  1169. return -EFAULT;
  1170. return ret;
  1171. }
  1172. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1173. }
  1174. COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
  1175. compat_loff_t __user *, offset, compat_size_t, count)
  1176. {
  1177. loff_t pos;
  1178. ssize_t ret;
  1179. if (offset) {
  1180. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1181. return -EFAULT;
  1182. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1183. if (unlikely(put_user(pos, offset)))
  1184. return -EFAULT;
  1185. return ret;
  1186. }
  1187. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1188. }
  1189. #endif