read_write.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. /*
  2. * linux/fs/read_write.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/stat.h>
  8. #include <linux/fcntl.h>
  9. #include <linux/file.h>
  10. #include <linux/uio.h>
  11. #include <linux/aio.h>
  12. #include <linux/fsnotify.h>
  13. #include <linux/security.h>
  14. #include <linux/export.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/splice.h>
  18. #include <linux/compat.h>
  19. #include "internal.h"
  20. #include <asm/uaccess.h>
  21. #include <asm/unistd.h>
  22. typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
  23. typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
  24. unsigned long, loff_t);
  25. typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
  26. const struct file_operations generic_ro_fops = {
  27. .llseek = generic_file_llseek,
  28. .read = new_sync_read,
  29. .read_iter = generic_file_read_iter,
  30. .mmap = generic_file_readonly_mmap,
  31. .splice_read = generic_file_splice_read,
  32. };
  33. EXPORT_SYMBOL(generic_ro_fops);
  34. static inline int unsigned_offsets(struct file *file)
  35. {
  36. return file->f_mode & FMODE_UNSIGNED_OFFSET;
  37. }
  38. /**
  39. * vfs_setpos - update the file offset for lseek
  40. * @file: file structure in question
  41. * @offset: file offset to seek to
  42. * @maxsize: maximum file size
  43. *
  44. * This is a low-level filesystem helper for updating the file offset to
  45. * the value specified by @offset if the given offset is valid and it is
  46. * not equal to the current file offset.
  47. *
  48. * Return the specified offset on success and -EINVAL on invalid offset.
  49. */
  50. loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
  51. {
  52. if (offset < 0 && !unsigned_offsets(file))
  53. return -EINVAL;
  54. if (offset > maxsize)
  55. return -EINVAL;
  56. if (offset != file->f_pos) {
  57. file->f_pos = offset;
  58. file->f_version = 0;
  59. }
  60. return offset;
  61. }
  62. EXPORT_SYMBOL(vfs_setpos);
  63. /**
  64. * generic_file_llseek_size - generic llseek implementation for regular files
  65. * @file: file structure to seek on
  66. * @offset: file offset to seek to
  67. * @whence: type of seek
  68. * @size: max size of this file in file system
  69. * @eof: offset used for SEEK_END position
  70. *
  71. * This is a variant of generic_file_llseek that allows passing in a custom
  72. * maximum file size and a custom EOF position, for e.g. hashed directories
  73. *
  74. * Synchronization:
  75. * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
  76. * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
  77. * read/writes behave like SEEK_SET against seeks.
  78. */
  79. loff_t
  80. generic_file_llseek_size(struct file *file, loff_t offset, int whence,
  81. loff_t maxsize, loff_t eof)
  82. {
  83. switch (whence) {
  84. case SEEK_END:
  85. offset += eof;
  86. break;
  87. case SEEK_CUR:
  88. /*
  89. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  90. * position-querying operation. Avoid rewriting the "same"
  91. * f_pos value back to the file because a concurrent read(),
  92. * write() or lseek() might have altered it
  93. */
  94. if (offset == 0)
  95. return file->f_pos;
  96. /*
  97. * f_lock protects against read/modify/write race with other
  98. * SEEK_CURs. Note that parallel writes and reads behave
  99. * like SEEK_SET.
  100. */
  101. spin_lock(&file->f_lock);
  102. offset = vfs_setpos(file, file->f_pos + offset, maxsize);
  103. spin_unlock(&file->f_lock);
  104. return offset;
  105. case SEEK_DATA:
  106. /*
  107. * In the generic case the entire file is data, so as long as
  108. * offset isn't at the end of the file then the offset is data.
  109. */
  110. if (offset >= eof)
  111. return -ENXIO;
  112. break;
  113. case SEEK_HOLE:
  114. /*
  115. * There is a virtual hole at the end of the file, so as long as
  116. * offset isn't i_size or larger, return i_size.
  117. */
  118. if (offset >= eof)
  119. return -ENXIO;
  120. offset = eof;
  121. break;
  122. }
  123. return vfs_setpos(file, offset, maxsize);
  124. }
  125. EXPORT_SYMBOL(generic_file_llseek_size);
  126. /**
  127. * generic_file_llseek - generic llseek implementation for regular files
  128. * @file: file structure to seek on
  129. * @offset: file offset to seek to
  130. * @whence: type of seek
  131. *
  132. * This is a generic implemenation of ->llseek useable for all normal local
  133. * filesystems. It just updates the file offset to the value specified by
  134. * @offset and @whence.
  135. */
  136. loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
  137. {
  138. struct inode *inode = file->f_mapping->host;
  139. return generic_file_llseek_size(file, offset, whence,
  140. inode->i_sb->s_maxbytes,
  141. i_size_read(inode));
  142. }
  143. EXPORT_SYMBOL(generic_file_llseek);
  144. /**
  145. * fixed_size_llseek - llseek implementation for fixed-sized devices
  146. * @file: file structure to seek on
  147. * @offset: file offset to seek to
  148. * @whence: type of seek
  149. * @size: size of the file
  150. *
  151. */
  152. loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
  153. {
  154. switch (whence) {
  155. case SEEK_SET: case SEEK_CUR: case SEEK_END:
  156. return generic_file_llseek_size(file, offset, whence,
  157. size, size);
  158. default:
  159. return -EINVAL;
  160. }
  161. }
  162. EXPORT_SYMBOL(fixed_size_llseek);
  163. /**
  164. * noop_llseek - No Operation Performed llseek implementation
  165. * @file: file structure to seek on
  166. * @offset: file offset to seek to
  167. * @whence: type of seek
  168. *
  169. * This is an implementation of ->llseek useable for the rare special case when
  170. * userspace expects the seek to succeed but the (device) file is actually not
  171. * able to perform the seek. In this case you use noop_llseek() instead of
  172. * falling back to the default implementation of ->llseek.
  173. */
  174. loff_t noop_llseek(struct file *file, loff_t offset, int whence)
  175. {
  176. return file->f_pos;
  177. }
  178. EXPORT_SYMBOL(noop_llseek);
  179. loff_t no_llseek(struct file *file, loff_t offset, int whence)
  180. {
  181. return -ESPIPE;
  182. }
  183. EXPORT_SYMBOL(no_llseek);
  184. loff_t default_llseek(struct file *file, loff_t offset, int whence)
  185. {
  186. struct inode *inode = file_inode(file);
  187. loff_t retval;
  188. mutex_lock(&inode->i_mutex);
  189. switch (whence) {
  190. case SEEK_END:
  191. offset += i_size_read(inode);
  192. break;
  193. case SEEK_CUR:
  194. if (offset == 0) {
  195. retval = file->f_pos;
  196. goto out;
  197. }
  198. offset += file->f_pos;
  199. break;
  200. case SEEK_DATA:
  201. /*
  202. * In the generic case the entire file is data, so as
  203. * long as offset isn't at the end of the file then the
  204. * offset is data.
  205. */
  206. if (offset >= inode->i_size) {
  207. retval = -ENXIO;
  208. goto out;
  209. }
  210. break;
  211. case SEEK_HOLE:
  212. /*
  213. * There is a virtual hole at the end of the file, so
  214. * as long as offset isn't i_size or larger, return
  215. * i_size.
  216. */
  217. if (offset >= inode->i_size) {
  218. retval = -ENXIO;
  219. goto out;
  220. }
  221. offset = inode->i_size;
  222. break;
  223. }
  224. retval = -EINVAL;
  225. if (offset >= 0 || unsigned_offsets(file)) {
  226. if (offset != file->f_pos) {
  227. file->f_pos = offset;
  228. file->f_version = 0;
  229. }
  230. retval = offset;
  231. }
  232. out:
  233. mutex_unlock(&inode->i_mutex);
  234. return retval;
  235. }
  236. EXPORT_SYMBOL(default_llseek);
  237. loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
  238. {
  239. loff_t (*fn)(struct file *, loff_t, int);
  240. fn = no_llseek;
  241. if (file->f_mode & FMODE_LSEEK) {
  242. if (file->f_op->llseek)
  243. fn = file->f_op->llseek;
  244. }
  245. return fn(file, offset, whence);
  246. }
  247. EXPORT_SYMBOL(vfs_llseek);
  248. static inline struct fd fdget_pos(int fd)
  249. {
  250. return __to_fd(__fdget_pos(fd));
  251. }
  252. static inline void fdput_pos(struct fd f)
  253. {
  254. if (f.flags & FDPUT_POS_UNLOCK)
  255. mutex_unlock(&f.file->f_pos_lock);
  256. fdput(f);
  257. }
  258. SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
  259. {
  260. off_t retval;
  261. struct fd f = fdget_pos(fd);
  262. if (!f.file)
  263. return -EBADF;
  264. retval = -EINVAL;
  265. if (whence <= SEEK_MAX) {
  266. loff_t res = vfs_llseek(f.file, offset, whence);
  267. retval = res;
  268. if (res != (loff_t)retval)
  269. retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
  270. }
  271. fdput_pos(f);
  272. return retval;
  273. }
  274. #ifdef CONFIG_COMPAT
  275. COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
  276. {
  277. return sys_lseek(fd, offset, whence);
  278. }
  279. #endif
  280. #ifdef __ARCH_WANT_SYS_LLSEEK
  281. SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
  282. unsigned long, offset_low, loff_t __user *, result,
  283. unsigned int, whence)
  284. {
  285. int retval;
  286. struct fd f = fdget_pos(fd);
  287. loff_t offset;
  288. if (!f.file)
  289. return -EBADF;
  290. retval = -EINVAL;
  291. if (whence > SEEK_MAX)
  292. goto out_putf;
  293. offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
  294. whence);
  295. retval = (int)offset;
  296. if (offset >= 0) {
  297. retval = -EFAULT;
  298. if (!copy_to_user(result, &offset, sizeof(offset)))
  299. retval = 0;
  300. }
  301. out_putf:
  302. fdput_pos(f);
  303. return retval;
  304. }
  305. #endif
  306. /*
  307. * rw_verify_area doesn't like huge counts. We limit
  308. * them to something that fits in "int" so that others
  309. * won't have to do range checks all the time.
  310. */
  311. int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
  312. {
  313. struct inode *inode;
  314. loff_t pos;
  315. int retval = -EINVAL;
  316. inode = file_inode(file);
  317. if (unlikely((ssize_t) count < 0))
  318. return retval;
  319. pos = *ppos;
  320. if (unlikely(pos < 0)) {
  321. if (!unsigned_offsets(file))
  322. return retval;
  323. if (count >= -pos) /* both values are in 0..LLONG_MAX */
  324. return -EOVERFLOW;
  325. } else if (unlikely((loff_t) (pos + count) < 0)) {
  326. if (!unsigned_offsets(file))
  327. return retval;
  328. }
  329. if (unlikely(inode->i_flock && mandatory_lock(inode))) {
  330. retval = locks_mandatory_area(
  331. read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
  332. inode, file, pos, count);
  333. if (retval < 0)
  334. return retval;
  335. }
  336. retval = security_file_permission(file,
  337. read_write == READ ? MAY_READ : MAY_WRITE);
  338. if (retval)
  339. return retval;
  340. return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
  341. }
  342. ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  343. {
  344. struct iovec iov = { .iov_base = buf, .iov_len = len };
  345. struct kiocb kiocb;
  346. ssize_t ret;
  347. init_sync_kiocb(&kiocb, filp);
  348. kiocb.ki_pos = *ppos;
  349. kiocb.ki_nbytes = len;
  350. ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
  351. if (-EIOCBQUEUED == ret)
  352. ret = wait_on_sync_kiocb(&kiocb);
  353. *ppos = kiocb.ki_pos;
  354. return ret;
  355. }
  356. EXPORT_SYMBOL(do_sync_read);
  357. ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  358. {
  359. struct iovec iov = { .iov_base = buf, .iov_len = len };
  360. struct kiocb kiocb;
  361. struct iov_iter iter;
  362. ssize_t ret;
  363. init_sync_kiocb(&kiocb, filp);
  364. kiocb.ki_pos = *ppos;
  365. kiocb.ki_nbytes = len;
  366. iov_iter_init(&iter, READ, &iov, 1, len);
  367. ret = filp->f_op->read_iter(&kiocb, &iter);
  368. if (-EIOCBQUEUED == ret)
  369. ret = wait_on_sync_kiocb(&kiocb);
  370. *ppos = kiocb.ki_pos;
  371. return ret;
  372. }
  373. EXPORT_SYMBOL(new_sync_read);
  374. ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
  375. loff_t *pos)
  376. {
  377. ssize_t ret;
  378. if (file->f_op->read)
  379. ret = file->f_op->read(file, buf, count, pos);
  380. else if (file->f_op->aio_read)
  381. ret = do_sync_read(file, buf, count, pos);
  382. else if (file->f_op->read_iter)
  383. ret = new_sync_read(file, buf, count, pos);
  384. else
  385. ret = -EINVAL;
  386. return ret;
  387. }
  388. ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
  389. {
  390. ssize_t ret;
  391. if (!(file->f_mode & FMODE_READ))
  392. return -EBADF;
  393. if (!(file->f_mode & FMODE_CAN_READ))
  394. return -EINVAL;
  395. if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
  396. return -EFAULT;
  397. ret = rw_verify_area(READ, file, pos, count);
  398. if (ret >= 0) {
  399. count = ret;
  400. ret = __vfs_read(file, buf, count, pos);
  401. if (ret > 0) {
  402. fsnotify_access(file);
  403. add_rchar(current, ret);
  404. }
  405. inc_syscr(current);
  406. }
  407. return ret;
  408. }
  409. EXPORT_SYMBOL(vfs_read);
  410. ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  411. {
  412. struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
  413. struct kiocb kiocb;
  414. ssize_t ret;
  415. init_sync_kiocb(&kiocb, filp);
  416. kiocb.ki_pos = *ppos;
  417. kiocb.ki_nbytes = len;
  418. ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
  419. if (-EIOCBQUEUED == ret)
  420. ret = wait_on_sync_kiocb(&kiocb);
  421. *ppos = kiocb.ki_pos;
  422. return ret;
  423. }
  424. EXPORT_SYMBOL(do_sync_write);
  425. ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  426. {
  427. struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
  428. struct kiocb kiocb;
  429. struct iov_iter iter;
  430. ssize_t ret;
  431. init_sync_kiocb(&kiocb, filp);
  432. kiocb.ki_pos = *ppos;
  433. kiocb.ki_nbytes = len;
  434. iov_iter_init(&iter, WRITE, &iov, 1, len);
  435. ret = filp->f_op->write_iter(&kiocb, &iter);
  436. if (-EIOCBQUEUED == ret)
  437. ret = wait_on_sync_kiocb(&kiocb);
  438. *ppos = kiocb.ki_pos;
  439. return ret;
  440. }
  441. EXPORT_SYMBOL(new_sync_write);
  442. ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
  443. {
  444. mm_segment_t old_fs;
  445. const char __user *p;
  446. ssize_t ret;
  447. if (!(file->f_mode & FMODE_CAN_WRITE))
  448. return -EINVAL;
  449. old_fs = get_fs();
  450. set_fs(get_ds());
  451. p = (__force const char __user *)buf;
  452. if (count > MAX_RW_COUNT)
  453. count = MAX_RW_COUNT;
  454. if (file->f_op->write)
  455. ret = file->f_op->write(file, p, count, pos);
  456. else if (file->f_op->aio_write)
  457. ret = do_sync_write(file, p, count, pos);
  458. else
  459. ret = new_sync_write(file, p, count, pos);
  460. set_fs(old_fs);
  461. if (ret > 0) {
  462. fsnotify_modify(file);
  463. add_wchar(current, ret);
  464. }
  465. inc_syscw(current);
  466. return ret;
  467. }
  468. EXPORT_SYMBOL(__kernel_write);
  469. ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
  470. {
  471. ssize_t ret;
  472. if (!(file->f_mode & FMODE_WRITE))
  473. return -EBADF;
  474. if (!(file->f_mode & FMODE_CAN_WRITE))
  475. return -EINVAL;
  476. if (unlikely(!access_ok(VERIFY_READ, buf, count)))
  477. return -EFAULT;
  478. ret = rw_verify_area(WRITE, file, pos, count);
  479. if (ret >= 0) {
  480. count = ret;
  481. file_start_write(file);
  482. if (file->f_op->write)
  483. ret = file->f_op->write(file, buf, count, pos);
  484. else if (file->f_op->aio_write)
  485. ret = do_sync_write(file, buf, count, pos);
  486. else
  487. ret = new_sync_write(file, buf, count, pos);
  488. if (ret > 0) {
  489. fsnotify_modify(file);
  490. add_wchar(current, ret);
  491. }
  492. inc_syscw(current);
  493. file_end_write(file);
  494. }
  495. return ret;
  496. }
  497. EXPORT_SYMBOL(vfs_write);
  498. static inline loff_t file_pos_read(struct file *file)
  499. {
  500. return file->f_pos;
  501. }
  502. static inline void file_pos_write(struct file *file, loff_t pos)
  503. {
  504. file->f_pos = pos;
  505. }
  506. SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
  507. {
  508. struct fd f = fdget_pos(fd);
  509. ssize_t ret = -EBADF;
  510. if (f.file) {
  511. loff_t pos = file_pos_read(f.file);
  512. ret = vfs_read(f.file, buf, count, &pos);
  513. if (ret >= 0)
  514. file_pos_write(f.file, pos);
  515. fdput_pos(f);
  516. }
  517. return ret;
  518. }
  519. SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
  520. size_t, count)
  521. {
  522. struct fd f = fdget_pos(fd);
  523. ssize_t ret = -EBADF;
  524. if (f.file) {
  525. loff_t pos = file_pos_read(f.file);
  526. ret = vfs_write(f.file, buf, count, &pos);
  527. if (ret >= 0)
  528. file_pos_write(f.file, pos);
  529. fdput_pos(f);
  530. }
  531. return ret;
  532. }
  533. SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
  534. size_t, count, loff_t, pos)
  535. {
  536. struct fd f;
  537. ssize_t ret = -EBADF;
  538. if (pos < 0)
  539. return -EINVAL;
  540. f = fdget(fd);
  541. if (f.file) {
  542. ret = -ESPIPE;
  543. if (f.file->f_mode & FMODE_PREAD)
  544. ret = vfs_read(f.file, buf, count, &pos);
  545. fdput(f);
  546. }
  547. return ret;
  548. }
  549. SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
  550. size_t, count, loff_t, pos)
  551. {
  552. struct fd f;
  553. ssize_t ret = -EBADF;
  554. if (pos < 0)
  555. return -EINVAL;
  556. f = fdget(fd);
  557. if (f.file) {
  558. ret = -ESPIPE;
  559. if (f.file->f_mode & FMODE_PWRITE)
  560. ret = vfs_write(f.file, buf, count, &pos);
  561. fdput(f);
  562. }
  563. return ret;
  564. }
  565. /*
  566. * Reduce an iovec's length in-place. Return the resulting number of segments
  567. */
  568. unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
  569. {
  570. unsigned long seg = 0;
  571. size_t len = 0;
  572. while (seg < nr_segs) {
  573. seg++;
  574. if (len + iov->iov_len >= to) {
  575. iov->iov_len = to - len;
  576. break;
  577. }
  578. len += iov->iov_len;
  579. iov++;
  580. }
  581. return seg;
  582. }
  583. EXPORT_SYMBOL(iov_shorten);
  584. static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov,
  585. unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn)
  586. {
  587. struct kiocb kiocb;
  588. struct iov_iter iter;
  589. ssize_t ret;
  590. init_sync_kiocb(&kiocb, filp);
  591. kiocb.ki_pos = *ppos;
  592. kiocb.ki_nbytes = len;
  593. iov_iter_init(&iter, rw, iov, nr_segs, len);
  594. ret = fn(&kiocb, &iter);
  595. if (ret == -EIOCBQUEUED)
  596. ret = wait_on_sync_kiocb(&kiocb);
  597. *ppos = kiocb.ki_pos;
  598. return ret;
  599. }
  600. static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
  601. unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
  602. {
  603. struct kiocb kiocb;
  604. ssize_t ret;
  605. init_sync_kiocb(&kiocb, filp);
  606. kiocb.ki_pos = *ppos;
  607. kiocb.ki_nbytes = len;
  608. ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
  609. if (ret == -EIOCBQUEUED)
  610. ret = wait_on_sync_kiocb(&kiocb);
  611. *ppos = kiocb.ki_pos;
  612. return ret;
  613. }
  614. /* Do it by hand, with file-ops */
  615. static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
  616. unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
  617. {
  618. struct iovec *vector = iov;
  619. ssize_t ret = 0;
  620. while (nr_segs > 0) {
  621. void __user *base;
  622. size_t len;
  623. ssize_t nr;
  624. base = vector->iov_base;
  625. len = vector->iov_len;
  626. vector++;
  627. nr_segs--;
  628. nr = fn(filp, base, len, ppos);
  629. if (nr < 0) {
  630. if (!ret)
  631. ret = nr;
  632. break;
  633. }
  634. ret += nr;
  635. if (nr != len)
  636. break;
  637. }
  638. return ret;
  639. }
  640. /* A write operation does a read from user space and vice versa */
  641. #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
  642. ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
  643. unsigned long nr_segs, unsigned long fast_segs,
  644. struct iovec *fast_pointer,
  645. struct iovec **ret_pointer)
  646. {
  647. unsigned long seg;
  648. ssize_t ret;
  649. struct iovec *iov = fast_pointer;
  650. /*
  651. * SuS says "The readv() function *may* fail if the iovcnt argument
  652. * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
  653. * traditionally returned zero for zero segments, so...
  654. */
  655. if (nr_segs == 0) {
  656. ret = 0;
  657. goto out;
  658. }
  659. /*
  660. * First get the "struct iovec" from user memory and
  661. * verify all the pointers
  662. */
  663. if (nr_segs > UIO_MAXIOV) {
  664. ret = -EINVAL;
  665. goto out;
  666. }
  667. if (nr_segs > fast_segs) {
  668. iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
  669. if (iov == NULL) {
  670. ret = -ENOMEM;
  671. goto out;
  672. }
  673. }
  674. if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
  675. ret = -EFAULT;
  676. goto out;
  677. }
  678. /*
  679. * According to the Single Unix Specification we should return EINVAL
  680. * if an element length is < 0 when cast to ssize_t or if the
  681. * total length would overflow the ssize_t return value of the
  682. * system call.
  683. *
  684. * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
  685. * overflow case.
  686. */
  687. ret = 0;
  688. for (seg = 0; seg < nr_segs; seg++) {
  689. void __user *buf = iov[seg].iov_base;
  690. ssize_t len = (ssize_t)iov[seg].iov_len;
  691. /* see if we we're about to use an invalid len or if
  692. * it's about to overflow ssize_t */
  693. if (len < 0) {
  694. ret = -EINVAL;
  695. goto out;
  696. }
  697. if (type >= 0
  698. && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
  699. ret = -EFAULT;
  700. goto out;
  701. }
  702. if (len > MAX_RW_COUNT - ret) {
  703. len = MAX_RW_COUNT - ret;
  704. iov[seg].iov_len = len;
  705. }
  706. ret += len;
  707. }
  708. out:
  709. *ret_pointer = iov;
  710. return ret;
  711. }
  712. static ssize_t do_readv_writev(int type, struct file *file,
  713. const struct iovec __user * uvector,
  714. unsigned long nr_segs, loff_t *pos)
  715. {
  716. size_t tot_len;
  717. struct iovec iovstack[UIO_FASTIOV];
  718. struct iovec *iov = iovstack;
  719. ssize_t ret;
  720. io_fn_t fn;
  721. iov_fn_t fnv;
  722. iter_fn_t iter_fn;
  723. ret = rw_copy_check_uvector(type, uvector, nr_segs,
  724. ARRAY_SIZE(iovstack), iovstack, &iov);
  725. if (ret <= 0)
  726. goto out;
  727. tot_len = ret;
  728. ret = rw_verify_area(type, file, pos, tot_len);
  729. if (ret < 0)
  730. goto out;
  731. fnv = NULL;
  732. if (type == READ) {
  733. fn = file->f_op->read;
  734. fnv = file->f_op->aio_read;
  735. iter_fn = file->f_op->read_iter;
  736. } else {
  737. fn = (io_fn_t)file->f_op->write;
  738. fnv = file->f_op->aio_write;
  739. iter_fn = file->f_op->write_iter;
  740. file_start_write(file);
  741. }
  742. if (iter_fn)
  743. ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
  744. pos, iter_fn);
  745. else if (fnv)
  746. ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
  747. pos, fnv);
  748. else
  749. ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
  750. if (type != READ)
  751. file_end_write(file);
  752. out:
  753. if (iov != iovstack)
  754. kfree(iov);
  755. if ((ret + (type == READ)) > 0) {
  756. if (type == READ)
  757. fsnotify_access(file);
  758. else
  759. fsnotify_modify(file);
  760. }
  761. return ret;
  762. }
  763. ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
  764. unsigned long vlen, loff_t *pos)
  765. {
  766. if (!(file->f_mode & FMODE_READ))
  767. return -EBADF;
  768. if (!(file->f_mode & FMODE_CAN_READ))
  769. return -EINVAL;
  770. return do_readv_writev(READ, file, vec, vlen, pos);
  771. }
  772. EXPORT_SYMBOL(vfs_readv);
  773. ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
  774. unsigned long vlen, loff_t *pos)
  775. {
  776. if (!(file->f_mode & FMODE_WRITE))
  777. return -EBADF;
  778. if (!(file->f_mode & FMODE_CAN_WRITE))
  779. return -EINVAL;
  780. return do_readv_writev(WRITE, file, vec, vlen, pos);
  781. }
  782. EXPORT_SYMBOL(vfs_writev);
  783. SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
  784. unsigned long, vlen)
  785. {
  786. struct fd f = fdget_pos(fd);
  787. ssize_t ret = -EBADF;
  788. if (f.file) {
  789. loff_t pos = file_pos_read(f.file);
  790. ret = vfs_readv(f.file, vec, vlen, &pos);
  791. if (ret >= 0)
  792. file_pos_write(f.file, pos);
  793. fdput_pos(f);
  794. }
  795. if (ret > 0)
  796. add_rchar(current, ret);
  797. inc_syscr(current);
  798. return ret;
  799. }
  800. SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
  801. unsigned long, vlen)
  802. {
  803. struct fd f = fdget_pos(fd);
  804. ssize_t ret = -EBADF;
  805. if (f.file) {
  806. loff_t pos = file_pos_read(f.file);
  807. ret = vfs_writev(f.file, vec, vlen, &pos);
  808. if (ret >= 0)
  809. file_pos_write(f.file, pos);
  810. fdput_pos(f);
  811. }
  812. if (ret > 0)
  813. add_wchar(current, ret);
  814. inc_syscw(current);
  815. return ret;
  816. }
  817. static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
  818. {
  819. #define HALF_LONG_BITS (BITS_PER_LONG / 2)
  820. return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
  821. }
  822. SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
  823. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  824. {
  825. loff_t pos = pos_from_hilo(pos_h, pos_l);
  826. struct fd f;
  827. ssize_t ret = -EBADF;
  828. if (pos < 0)
  829. return -EINVAL;
  830. f = fdget(fd);
  831. if (f.file) {
  832. ret = -ESPIPE;
  833. if (f.file->f_mode & FMODE_PREAD)
  834. ret = vfs_readv(f.file, vec, vlen, &pos);
  835. fdput(f);
  836. }
  837. if (ret > 0)
  838. add_rchar(current, ret);
  839. inc_syscr(current);
  840. return ret;
  841. }
  842. SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
  843. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  844. {
  845. loff_t pos = pos_from_hilo(pos_h, pos_l);
  846. struct fd f;
  847. ssize_t ret = -EBADF;
  848. if (pos < 0)
  849. return -EINVAL;
  850. f = fdget(fd);
  851. if (f.file) {
  852. ret = -ESPIPE;
  853. if (f.file->f_mode & FMODE_PWRITE)
  854. ret = vfs_writev(f.file, vec, vlen, &pos);
  855. fdput(f);
  856. }
  857. if (ret > 0)
  858. add_wchar(current, ret);
  859. inc_syscw(current);
  860. return ret;
  861. }
  862. #ifdef CONFIG_COMPAT
  863. static ssize_t compat_do_readv_writev(int type, struct file *file,
  864. const struct compat_iovec __user *uvector,
  865. unsigned long nr_segs, loff_t *pos)
  866. {
  867. compat_ssize_t tot_len;
  868. struct iovec iovstack[UIO_FASTIOV];
  869. struct iovec *iov = iovstack;
  870. ssize_t ret;
  871. io_fn_t fn;
  872. iov_fn_t fnv;
  873. iter_fn_t iter_fn;
  874. ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
  875. UIO_FASTIOV, iovstack, &iov);
  876. if (ret <= 0)
  877. goto out;
  878. tot_len = ret;
  879. ret = rw_verify_area(type, file, pos, tot_len);
  880. if (ret < 0)
  881. goto out;
  882. fnv = NULL;
  883. if (type == READ) {
  884. fn = file->f_op->read;
  885. fnv = file->f_op->aio_read;
  886. iter_fn = file->f_op->read_iter;
  887. } else {
  888. fn = (io_fn_t)file->f_op->write;
  889. fnv = file->f_op->aio_write;
  890. iter_fn = file->f_op->write_iter;
  891. file_start_write(file);
  892. }
  893. if (iter_fn)
  894. ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
  895. pos, iter_fn);
  896. else if (fnv)
  897. ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
  898. pos, fnv);
  899. else
  900. ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
  901. if (type != READ)
  902. file_end_write(file);
  903. out:
  904. if (iov != iovstack)
  905. kfree(iov);
  906. if ((ret + (type == READ)) > 0) {
  907. if (type == READ)
  908. fsnotify_access(file);
  909. else
  910. fsnotify_modify(file);
  911. }
  912. return ret;
  913. }
  914. static size_t compat_readv(struct file *file,
  915. const struct compat_iovec __user *vec,
  916. unsigned long vlen, loff_t *pos)
  917. {
  918. ssize_t ret = -EBADF;
  919. if (!(file->f_mode & FMODE_READ))
  920. goto out;
  921. ret = -EINVAL;
  922. if (!(file->f_mode & FMODE_CAN_READ))
  923. goto out;
  924. ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
  925. out:
  926. if (ret > 0)
  927. add_rchar(current, ret);
  928. inc_syscr(current);
  929. return ret;
  930. }
  931. COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
  932. const struct compat_iovec __user *,vec,
  933. compat_ulong_t, vlen)
  934. {
  935. struct fd f = fdget_pos(fd);
  936. ssize_t ret;
  937. loff_t pos;
  938. if (!f.file)
  939. return -EBADF;
  940. pos = f.file->f_pos;
  941. ret = compat_readv(f.file, vec, vlen, &pos);
  942. if (ret >= 0)
  943. f.file->f_pos = pos;
  944. fdput_pos(f);
  945. return ret;
  946. }
  947. static long __compat_sys_preadv64(unsigned long fd,
  948. const struct compat_iovec __user *vec,
  949. unsigned long vlen, loff_t pos)
  950. {
  951. struct fd f;
  952. ssize_t ret;
  953. if (pos < 0)
  954. return -EINVAL;
  955. f = fdget(fd);
  956. if (!f.file)
  957. return -EBADF;
  958. ret = -ESPIPE;
  959. if (f.file->f_mode & FMODE_PREAD)
  960. ret = compat_readv(f.file, vec, vlen, &pos);
  961. fdput(f);
  962. return ret;
  963. }
  964. #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
  965. COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
  966. const struct compat_iovec __user *,vec,
  967. unsigned long, vlen, loff_t, pos)
  968. {
  969. return __compat_sys_preadv64(fd, vec, vlen, pos);
  970. }
  971. #endif
  972. COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
  973. const struct compat_iovec __user *,vec,
  974. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  975. {
  976. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  977. return __compat_sys_preadv64(fd, vec, vlen, pos);
  978. }
  979. static size_t compat_writev(struct file *file,
  980. const struct compat_iovec __user *vec,
  981. unsigned long vlen, loff_t *pos)
  982. {
  983. ssize_t ret = -EBADF;
  984. if (!(file->f_mode & FMODE_WRITE))
  985. goto out;
  986. ret = -EINVAL;
  987. if (!(file->f_mode & FMODE_CAN_WRITE))
  988. goto out;
  989. ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
  990. out:
  991. if (ret > 0)
  992. add_wchar(current, ret);
  993. inc_syscw(current);
  994. return ret;
  995. }
  996. COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
  997. const struct compat_iovec __user *, vec,
  998. compat_ulong_t, vlen)
  999. {
  1000. struct fd f = fdget_pos(fd);
  1001. ssize_t ret;
  1002. loff_t pos;
  1003. if (!f.file)
  1004. return -EBADF;
  1005. pos = f.file->f_pos;
  1006. ret = compat_writev(f.file, vec, vlen, &pos);
  1007. if (ret >= 0)
  1008. f.file->f_pos = pos;
  1009. fdput_pos(f);
  1010. return ret;
  1011. }
  1012. static long __compat_sys_pwritev64(unsigned long fd,
  1013. const struct compat_iovec __user *vec,
  1014. unsigned long vlen, loff_t pos)
  1015. {
  1016. struct fd f;
  1017. ssize_t ret;
  1018. if (pos < 0)
  1019. return -EINVAL;
  1020. f = fdget(fd);
  1021. if (!f.file)
  1022. return -EBADF;
  1023. ret = -ESPIPE;
  1024. if (f.file->f_mode & FMODE_PWRITE)
  1025. ret = compat_writev(f.file, vec, vlen, &pos);
  1026. fdput(f);
  1027. return ret;
  1028. }
  1029. #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
  1030. COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
  1031. const struct compat_iovec __user *,vec,
  1032. unsigned long, vlen, loff_t, pos)
  1033. {
  1034. return __compat_sys_pwritev64(fd, vec, vlen, pos);
  1035. }
  1036. #endif
  1037. COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
  1038. const struct compat_iovec __user *,vec,
  1039. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  1040. {
  1041. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1042. return __compat_sys_pwritev64(fd, vec, vlen, pos);
  1043. }
  1044. #endif
  1045. static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
  1046. size_t count, loff_t max)
  1047. {
  1048. struct fd in, out;
  1049. struct inode *in_inode, *out_inode;
  1050. loff_t pos;
  1051. loff_t out_pos;
  1052. ssize_t retval;
  1053. int fl;
  1054. /*
  1055. * Get input file, and verify that it is ok..
  1056. */
  1057. retval = -EBADF;
  1058. in = fdget(in_fd);
  1059. if (!in.file)
  1060. goto out;
  1061. if (!(in.file->f_mode & FMODE_READ))
  1062. goto fput_in;
  1063. retval = -ESPIPE;
  1064. if (!ppos) {
  1065. pos = in.file->f_pos;
  1066. } else {
  1067. pos = *ppos;
  1068. if (!(in.file->f_mode & FMODE_PREAD))
  1069. goto fput_in;
  1070. }
  1071. retval = rw_verify_area(READ, in.file, &pos, count);
  1072. if (retval < 0)
  1073. goto fput_in;
  1074. count = retval;
  1075. /*
  1076. * Get output file, and verify that it is ok..
  1077. */
  1078. retval = -EBADF;
  1079. out = fdget(out_fd);
  1080. if (!out.file)
  1081. goto fput_in;
  1082. if (!(out.file->f_mode & FMODE_WRITE))
  1083. goto fput_out;
  1084. retval = -EINVAL;
  1085. in_inode = file_inode(in.file);
  1086. out_inode = file_inode(out.file);
  1087. out_pos = out.file->f_pos;
  1088. retval = rw_verify_area(WRITE, out.file, &out_pos, count);
  1089. if (retval < 0)
  1090. goto fput_out;
  1091. count = retval;
  1092. if (!max)
  1093. max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
  1094. if (unlikely(pos + count > max)) {
  1095. retval = -EOVERFLOW;
  1096. if (pos >= max)
  1097. goto fput_out;
  1098. count = max - pos;
  1099. }
  1100. fl = 0;
  1101. #if 0
  1102. /*
  1103. * We need to debate whether we can enable this or not. The
  1104. * man page documents EAGAIN return for the output at least,
  1105. * and the application is arguably buggy if it doesn't expect
  1106. * EAGAIN on a non-blocking file descriptor.
  1107. */
  1108. if (in.file->f_flags & O_NONBLOCK)
  1109. fl = SPLICE_F_NONBLOCK;
  1110. #endif
  1111. file_start_write(out.file);
  1112. retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
  1113. file_end_write(out.file);
  1114. if (retval > 0) {
  1115. add_rchar(current, retval);
  1116. add_wchar(current, retval);
  1117. fsnotify_access(in.file);
  1118. fsnotify_modify(out.file);
  1119. out.file->f_pos = out_pos;
  1120. if (ppos)
  1121. *ppos = pos;
  1122. else
  1123. in.file->f_pos = pos;
  1124. }
  1125. inc_syscr(current);
  1126. inc_syscw(current);
  1127. if (pos > max)
  1128. retval = -EOVERFLOW;
  1129. fput_out:
  1130. fdput(out);
  1131. fput_in:
  1132. fdput(in);
  1133. out:
  1134. return retval;
  1135. }
  1136. SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
  1137. {
  1138. loff_t pos;
  1139. off_t off;
  1140. ssize_t ret;
  1141. if (offset) {
  1142. if (unlikely(get_user(off, offset)))
  1143. return -EFAULT;
  1144. pos = off;
  1145. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1146. if (unlikely(put_user(pos, offset)))
  1147. return -EFAULT;
  1148. return ret;
  1149. }
  1150. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1151. }
  1152. SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
  1153. {
  1154. loff_t pos;
  1155. ssize_t ret;
  1156. if (offset) {
  1157. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1158. return -EFAULT;
  1159. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1160. if (unlikely(put_user(pos, offset)))
  1161. return -EFAULT;
  1162. return ret;
  1163. }
  1164. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1165. }
  1166. #ifdef CONFIG_COMPAT
  1167. COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
  1168. compat_off_t __user *, offset, compat_size_t, count)
  1169. {
  1170. loff_t pos;
  1171. off_t off;
  1172. ssize_t ret;
  1173. if (offset) {
  1174. if (unlikely(get_user(off, offset)))
  1175. return -EFAULT;
  1176. pos = off;
  1177. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1178. if (unlikely(put_user(pos, offset)))
  1179. return -EFAULT;
  1180. return ret;
  1181. }
  1182. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1183. }
  1184. COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
  1185. compat_loff_t __user *, offset, compat_size_t, count)
  1186. {
  1187. loff_t pos;
  1188. ssize_t ret;
  1189. if (offset) {
  1190. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1191. return -EFAULT;
  1192. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1193. if (unlikely(put_user(pos, offset)))
  1194. return -EFAULT;
  1195. return ret;
  1196. }
  1197. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1198. }
  1199. #endif