xfs_file.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_da_format.h"
  14. #include "xfs_da_btree.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_inode_item.h"
  18. #include "xfs_bmap.h"
  19. #include "xfs_bmap_util.h"
  20. #include "xfs_error.h"
  21. #include "xfs_dir2.h"
  22. #include "xfs_dir2_priv.h"
  23. #include "xfs_ioctl.h"
  24. #include "xfs_trace.h"
  25. #include "xfs_log.h"
  26. #include "xfs_icache.h"
  27. #include "xfs_pnfs.h"
  28. #include "xfs_iomap.h"
  29. #include "xfs_reflink.h"
  30. #include <linux/dcache.h>
  31. #include <linux/falloc.h>
  32. #include <linux/pagevec.h>
  33. #include <linux/backing-dev.h>
  34. #include <linux/mman.h>
  35. static const struct vm_operations_struct xfs_file_vm_ops;
  36. int
  37. xfs_update_prealloc_flags(
  38. struct xfs_inode *ip,
  39. enum xfs_prealloc_flags flags)
  40. {
  41. struct xfs_trans *tp;
  42. int error;
  43. error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
  44. 0, 0, 0, &tp);
  45. if (error)
  46. return error;
  47. xfs_ilock(ip, XFS_ILOCK_EXCL);
  48. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  49. if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  50. VFS_I(ip)->i_mode &= ~S_ISUID;
  51. if (VFS_I(ip)->i_mode & S_IXGRP)
  52. VFS_I(ip)->i_mode &= ~S_ISGID;
  53. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  54. }
  55. if (flags & XFS_PREALLOC_SET)
  56. ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  57. if (flags & XFS_PREALLOC_CLEAR)
  58. ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  59. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  60. if (flags & XFS_PREALLOC_SYNC)
  61. xfs_trans_set_sync(tp);
  62. return xfs_trans_commit(tp);
  63. }
  64. /*
  65. * Fsync operations on directories are much simpler than on regular files,
  66. * as there is no file data to flush, and thus also no need for explicit
  67. * cache flush operations, and there are no non-transaction metadata updates
  68. * on directories either.
  69. */
  70. STATIC int
  71. xfs_dir_fsync(
  72. struct file *file,
  73. loff_t start,
  74. loff_t end,
  75. int datasync)
  76. {
  77. struct xfs_inode *ip = XFS_I(file->f_mapping->host);
  78. struct xfs_mount *mp = ip->i_mount;
  79. xfs_lsn_t lsn = 0;
  80. trace_xfs_dir_fsync(ip);
  81. xfs_ilock(ip, XFS_ILOCK_SHARED);
  82. if (xfs_ipincount(ip))
  83. lsn = ip->i_itemp->ili_last_lsn;
  84. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  85. if (!lsn)
  86. return 0;
  87. return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
  88. }
  89. STATIC int
  90. xfs_file_fsync(
  91. struct file *file,
  92. loff_t start,
  93. loff_t end,
  94. int datasync)
  95. {
  96. struct inode *inode = file->f_mapping->host;
  97. struct xfs_inode *ip = XFS_I(inode);
  98. struct xfs_mount *mp = ip->i_mount;
  99. int error = 0;
  100. int log_flushed = 0;
  101. xfs_lsn_t lsn = 0;
  102. trace_xfs_file_fsync(ip);
  103. error = file_write_and_wait_range(file, start, end);
  104. if (error)
  105. return error;
  106. if (XFS_FORCED_SHUTDOWN(mp))
  107. return -EIO;
  108. xfs_iflags_clear(ip, XFS_ITRUNCATED);
  109. /*
  110. * If we have an RT and/or log subvolume we need to make sure to flush
  111. * the write cache the device used for file data first. This is to
  112. * ensure newly written file data make it to disk before logging the new
  113. * inode size in case of an extending write.
  114. */
  115. if (XFS_IS_REALTIME_INODE(ip))
  116. xfs_blkdev_issue_flush(mp->m_rtdev_targp);
  117. else if (mp->m_logdev_targp != mp->m_ddev_targp)
  118. xfs_blkdev_issue_flush(mp->m_ddev_targp);
  119. /*
  120. * All metadata updates are logged, which means that we just have to
  121. * flush the log up to the latest LSN that touched the inode. If we have
  122. * concurrent fsync/fdatasync() calls, we need them to all block on the
  123. * log force before we clear the ili_fsync_fields field. This ensures
  124. * that we don't get a racing sync operation that does not wait for the
  125. * metadata to hit the journal before returning. If we race with
  126. * clearing the ili_fsync_fields, then all that will happen is the log
  127. * force will do nothing as the lsn will already be on disk. We can't
  128. * race with setting ili_fsync_fields because that is done under
  129. * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
  130. * until after the ili_fsync_fields is cleared.
  131. */
  132. xfs_ilock(ip, XFS_ILOCK_SHARED);
  133. if (xfs_ipincount(ip)) {
  134. if (!datasync ||
  135. (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  136. lsn = ip->i_itemp->ili_last_lsn;
  137. }
  138. if (lsn) {
  139. error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
  140. ip->i_itemp->ili_fsync_fields = 0;
  141. }
  142. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  143. /*
  144. * If we only have a single device, and the log force about was
  145. * a no-op we might have to flush the data device cache here.
  146. * This can only happen for fdatasync/O_DSYNC if we were overwriting
  147. * an already allocated file and thus do not have any metadata to
  148. * commit.
  149. */
  150. if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
  151. mp->m_logdev_targp == mp->m_ddev_targp)
  152. xfs_blkdev_issue_flush(mp->m_ddev_targp);
  153. return error;
  154. }
  155. STATIC ssize_t
  156. xfs_file_dio_aio_read(
  157. struct kiocb *iocb,
  158. struct iov_iter *to)
  159. {
  160. struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
  161. size_t count = iov_iter_count(to);
  162. ssize_t ret;
  163. trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
  164. if (!count)
  165. return 0; /* skip atime */
  166. file_accessed(iocb->ki_filp);
  167. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  168. ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
  169. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  170. return ret;
  171. }
  172. static noinline ssize_t
  173. xfs_file_dax_read(
  174. struct kiocb *iocb,
  175. struct iov_iter *to)
  176. {
  177. struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
  178. size_t count = iov_iter_count(to);
  179. ssize_t ret = 0;
  180. trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
  181. if (!count)
  182. return 0; /* skip atime */
  183. if (iocb->ki_flags & IOCB_NOWAIT) {
  184. if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
  185. return -EAGAIN;
  186. } else {
  187. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  188. }
  189. ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
  190. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  191. file_accessed(iocb->ki_filp);
  192. return ret;
  193. }
  194. STATIC ssize_t
  195. xfs_file_buffered_aio_read(
  196. struct kiocb *iocb,
  197. struct iov_iter *to)
  198. {
  199. struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
  200. ssize_t ret;
  201. trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
  202. if (iocb->ki_flags & IOCB_NOWAIT) {
  203. if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
  204. return -EAGAIN;
  205. } else {
  206. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  207. }
  208. ret = generic_file_read_iter(iocb, to);
  209. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  210. return ret;
  211. }
  212. STATIC ssize_t
  213. xfs_file_read_iter(
  214. struct kiocb *iocb,
  215. struct iov_iter *to)
  216. {
  217. struct inode *inode = file_inode(iocb->ki_filp);
  218. struct xfs_mount *mp = XFS_I(inode)->i_mount;
  219. ssize_t ret = 0;
  220. XFS_STATS_INC(mp, xs_read_calls);
  221. if (XFS_FORCED_SHUTDOWN(mp))
  222. return -EIO;
  223. if (IS_DAX(inode))
  224. ret = xfs_file_dax_read(iocb, to);
  225. else if (iocb->ki_flags & IOCB_DIRECT)
  226. ret = xfs_file_dio_aio_read(iocb, to);
  227. else
  228. ret = xfs_file_buffered_aio_read(iocb, to);
  229. if (ret > 0)
  230. XFS_STATS_ADD(mp, xs_read_bytes, ret);
  231. return ret;
  232. }
  233. /*
  234. * Common pre-write limit and setup checks.
  235. *
  236. * Called with the iolocked held either shared and exclusive according to
  237. * @iolock, and returns with it held. Might upgrade the iolock to exclusive
  238. * if called for a direct write beyond i_size.
  239. */
  240. STATIC ssize_t
  241. xfs_file_aio_write_checks(
  242. struct kiocb *iocb,
  243. struct iov_iter *from,
  244. int *iolock)
  245. {
  246. struct file *file = iocb->ki_filp;
  247. struct inode *inode = file->f_mapping->host;
  248. struct xfs_inode *ip = XFS_I(inode);
  249. ssize_t error = 0;
  250. size_t count = iov_iter_count(from);
  251. bool drained_dio = false;
  252. loff_t isize;
  253. restart:
  254. error = generic_write_checks(iocb, from);
  255. if (error <= 0)
  256. return error;
  257. error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
  258. if (error)
  259. return error;
  260. /*
  261. * For changing security info in file_remove_privs() we need i_rwsem
  262. * exclusively.
  263. */
  264. if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
  265. xfs_iunlock(ip, *iolock);
  266. *iolock = XFS_IOLOCK_EXCL;
  267. xfs_ilock(ip, *iolock);
  268. goto restart;
  269. }
  270. /*
  271. * If the offset is beyond the size of the file, we need to zero any
  272. * blocks that fall between the existing EOF and the start of this
  273. * write. If zeroing is needed and we are currently holding the
  274. * iolock shared, we need to update it to exclusive which implies
  275. * having to redo all checks before.
  276. *
  277. * We need to serialise against EOF updates that occur in IO
  278. * completions here. We want to make sure that nobody is changing the
  279. * size while we do this check until we have placed an IO barrier (i.e.
  280. * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
  281. * The spinlock effectively forms a memory barrier once we have the
  282. * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
  283. * and hence be able to correctly determine if we need to run zeroing.
  284. */
  285. spin_lock(&ip->i_flags_lock);
  286. isize = i_size_read(inode);
  287. if (iocb->ki_pos > isize) {
  288. spin_unlock(&ip->i_flags_lock);
  289. if (!drained_dio) {
  290. if (*iolock == XFS_IOLOCK_SHARED) {
  291. xfs_iunlock(ip, *iolock);
  292. *iolock = XFS_IOLOCK_EXCL;
  293. xfs_ilock(ip, *iolock);
  294. iov_iter_reexpand(from, count);
  295. }
  296. /*
  297. * We now have an IO submission barrier in place, but
  298. * AIO can do EOF updates during IO completion and hence
  299. * we now need to wait for all of them to drain. Non-AIO
  300. * DIO will have drained before we are given the
  301. * XFS_IOLOCK_EXCL, and so for most cases this wait is a
  302. * no-op.
  303. */
  304. inode_dio_wait(inode);
  305. drained_dio = true;
  306. goto restart;
  307. }
  308. trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
  309. error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
  310. NULL, &xfs_iomap_ops);
  311. if (error)
  312. return error;
  313. } else
  314. spin_unlock(&ip->i_flags_lock);
  315. /*
  316. * Updating the timestamps will grab the ilock again from
  317. * xfs_fs_dirty_inode, so we have to call it after dropping the
  318. * lock above. Eventually we should look into a way to avoid
  319. * the pointless lock roundtrip.
  320. */
  321. if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
  322. error = file_update_time(file);
  323. if (error)
  324. return error;
  325. }
  326. /*
  327. * If we're writing the file then make sure to clear the setuid and
  328. * setgid bits if the process is not being run by root. This keeps
  329. * people from modifying setuid and setgid binaries.
  330. */
  331. if (!IS_NOSEC(inode))
  332. return file_remove_privs(file);
  333. return 0;
  334. }
  335. static int
  336. xfs_dio_write_end_io(
  337. struct kiocb *iocb,
  338. ssize_t size,
  339. unsigned flags)
  340. {
  341. struct inode *inode = file_inode(iocb->ki_filp);
  342. struct xfs_inode *ip = XFS_I(inode);
  343. loff_t offset = iocb->ki_pos;
  344. int error = 0;
  345. trace_xfs_end_io_direct_write(ip, offset, size);
  346. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  347. return -EIO;
  348. if (size <= 0)
  349. return size;
  350. /*
  351. * Capture amount written on completion as we can't reliably account
  352. * for it on submission.
  353. */
  354. XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
  355. if (flags & IOMAP_DIO_COW) {
  356. error = xfs_reflink_end_cow(ip, offset, size);
  357. if (error)
  358. return error;
  359. }
  360. /*
  361. * Unwritten conversion updates the in-core isize after extent
  362. * conversion but before updating the on-disk size. Updating isize any
  363. * earlier allows a racing dio read to find unwritten extents before
  364. * they are converted.
  365. */
  366. if (flags & IOMAP_DIO_UNWRITTEN)
  367. return xfs_iomap_write_unwritten(ip, offset, size, true);
  368. /*
  369. * We need to update the in-core inode size here so that we don't end up
  370. * with the on-disk inode size being outside the in-core inode size. We
  371. * have no other method of updating EOF for AIO, so always do it here
  372. * if necessary.
  373. *
  374. * We need to lock the test/set EOF update as we can be racing with
  375. * other IO completions here to update the EOF. Failing to serialise
  376. * here can result in EOF moving backwards and Bad Things Happen when
  377. * that occurs.
  378. */
  379. spin_lock(&ip->i_flags_lock);
  380. if (offset + size > i_size_read(inode)) {
  381. i_size_write(inode, offset + size);
  382. spin_unlock(&ip->i_flags_lock);
  383. error = xfs_setfilesize(ip, offset, size);
  384. } else {
  385. spin_unlock(&ip->i_flags_lock);
  386. }
  387. return error;
  388. }
  389. /*
  390. * xfs_file_dio_aio_write - handle direct IO writes
  391. *
  392. * Lock the inode appropriately to prepare for and issue a direct IO write.
  393. * By separating it from the buffered write path we remove all the tricky to
  394. * follow locking changes and looping.
  395. *
  396. * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
  397. * until we're sure the bytes at the new EOF have been zeroed and/or the cached
  398. * pages are flushed out.
  399. *
  400. * In most cases the direct IO writes will be done holding IOLOCK_SHARED
  401. * allowing them to be done in parallel with reads and other direct IO writes.
  402. * However, if the IO is not aligned to filesystem blocks, the direct IO layer
  403. * needs to do sub-block zeroing and that requires serialisation against other
  404. * direct IOs to the same block. In this case we need to serialise the
  405. * submission of the unaligned IOs so that we don't get racing block zeroing in
  406. * the dio layer. To avoid the problem with aio, we also need to wait for
  407. * outstanding IOs to complete so that unwritten extent conversion is completed
  408. * before we try to map the overlapping block. This is currently implemented by
  409. * hitting it with a big hammer (i.e. inode_dio_wait()).
  410. *
  411. * Returns with locks held indicated by @iolock and errors indicated by
  412. * negative return values.
  413. */
  414. STATIC ssize_t
  415. xfs_file_dio_aio_write(
  416. struct kiocb *iocb,
  417. struct iov_iter *from)
  418. {
  419. struct file *file = iocb->ki_filp;
  420. struct address_space *mapping = file->f_mapping;
  421. struct inode *inode = mapping->host;
  422. struct xfs_inode *ip = XFS_I(inode);
  423. struct xfs_mount *mp = ip->i_mount;
  424. ssize_t ret = 0;
  425. int unaligned_io = 0;
  426. int iolock;
  427. size_t count = iov_iter_count(from);
  428. struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
  429. mp->m_rtdev_targp : mp->m_ddev_targp;
  430. /* DIO must be aligned to device logical sector size */
  431. if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
  432. return -EINVAL;
  433. /*
  434. * Don't take the exclusive iolock here unless the I/O is unaligned to
  435. * the file system block size. We don't need to consider the EOF
  436. * extension case here because xfs_file_aio_write_checks() will relock
  437. * the inode as necessary for EOF zeroing cases and fill out the new
  438. * inode size as appropriate.
  439. */
  440. if ((iocb->ki_pos & mp->m_blockmask) ||
  441. ((iocb->ki_pos + count) & mp->m_blockmask)) {
  442. unaligned_io = 1;
  443. /*
  444. * We can't properly handle unaligned direct I/O to reflink
  445. * files yet, as we can't unshare a partial block.
  446. */
  447. if (xfs_is_reflink_inode(ip)) {
  448. trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
  449. return -EREMCHG;
  450. }
  451. iolock = XFS_IOLOCK_EXCL;
  452. } else {
  453. iolock = XFS_IOLOCK_SHARED;
  454. }
  455. if (iocb->ki_flags & IOCB_NOWAIT) {
  456. if (!xfs_ilock_nowait(ip, iolock))
  457. return -EAGAIN;
  458. } else {
  459. xfs_ilock(ip, iolock);
  460. }
  461. ret = xfs_file_aio_write_checks(iocb, from, &iolock);
  462. if (ret)
  463. goto out;
  464. count = iov_iter_count(from);
  465. /*
  466. * If we are doing unaligned IO, wait for all other IO to drain,
  467. * otherwise demote the lock if we had to take the exclusive lock
  468. * for other reasons in xfs_file_aio_write_checks.
  469. */
  470. if (unaligned_io) {
  471. /* If we are going to wait for other DIO to finish, bail */
  472. if (iocb->ki_flags & IOCB_NOWAIT) {
  473. if (atomic_read(&inode->i_dio_count))
  474. return -EAGAIN;
  475. } else {
  476. inode_dio_wait(inode);
  477. }
  478. } else if (iolock == XFS_IOLOCK_EXCL) {
  479. xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
  480. iolock = XFS_IOLOCK_SHARED;
  481. }
  482. trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
  483. ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
  484. out:
  485. xfs_iunlock(ip, iolock);
  486. /*
  487. * No fallback to buffered IO on errors for XFS, direct IO will either
  488. * complete fully or fail.
  489. */
  490. ASSERT(ret < 0 || ret == count);
  491. return ret;
  492. }
  493. static noinline ssize_t
  494. xfs_file_dax_write(
  495. struct kiocb *iocb,
  496. struct iov_iter *from)
  497. {
  498. struct inode *inode = iocb->ki_filp->f_mapping->host;
  499. struct xfs_inode *ip = XFS_I(inode);
  500. int iolock = XFS_IOLOCK_EXCL;
  501. ssize_t ret, error = 0;
  502. size_t count;
  503. loff_t pos;
  504. if (iocb->ki_flags & IOCB_NOWAIT) {
  505. if (!xfs_ilock_nowait(ip, iolock))
  506. return -EAGAIN;
  507. } else {
  508. xfs_ilock(ip, iolock);
  509. }
  510. ret = xfs_file_aio_write_checks(iocb, from, &iolock);
  511. if (ret)
  512. goto out;
  513. pos = iocb->ki_pos;
  514. count = iov_iter_count(from);
  515. trace_xfs_file_dax_write(ip, count, pos);
  516. ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
  517. if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
  518. i_size_write(inode, iocb->ki_pos);
  519. error = xfs_setfilesize(ip, pos, ret);
  520. }
  521. out:
  522. xfs_iunlock(ip, iolock);
  523. if (error)
  524. return error;
  525. if (ret > 0) {
  526. XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
  527. /* Handle various SYNC-type writes */
  528. ret = generic_write_sync(iocb, ret);
  529. }
  530. return ret;
  531. }
  532. STATIC ssize_t
  533. xfs_file_buffered_aio_write(
  534. struct kiocb *iocb,
  535. struct iov_iter *from)
  536. {
  537. struct file *file = iocb->ki_filp;
  538. struct address_space *mapping = file->f_mapping;
  539. struct inode *inode = mapping->host;
  540. struct xfs_inode *ip = XFS_I(inode);
  541. ssize_t ret;
  542. int enospc = 0;
  543. int iolock;
  544. if (iocb->ki_flags & IOCB_NOWAIT)
  545. return -EOPNOTSUPP;
  546. write_retry:
  547. iolock = XFS_IOLOCK_EXCL;
  548. xfs_ilock(ip, iolock);
  549. ret = xfs_file_aio_write_checks(iocb, from, &iolock);
  550. if (ret)
  551. goto out;
  552. /* We can write back this queue in page reclaim */
  553. current->backing_dev_info = inode_to_bdi(inode);
  554. trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
  555. ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
  556. if (likely(ret >= 0))
  557. iocb->ki_pos += ret;
  558. /*
  559. * If we hit a space limit, try to free up some lingering preallocated
  560. * space before returning an error. In the case of ENOSPC, first try to
  561. * write back all dirty inodes to free up some of the excess reserved
  562. * metadata space. This reduces the chances that the eofblocks scan
  563. * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
  564. * also behaves as a filter to prevent too many eofblocks scans from
  565. * running at the same time.
  566. */
  567. if (ret == -EDQUOT && !enospc) {
  568. xfs_iunlock(ip, iolock);
  569. enospc = xfs_inode_free_quota_eofblocks(ip);
  570. if (enospc)
  571. goto write_retry;
  572. enospc = xfs_inode_free_quota_cowblocks(ip);
  573. if (enospc)
  574. goto write_retry;
  575. iolock = 0;
  576. } else if (ret == -ENOSPC && !enospc) {
  577. struct xfs_eofblocks eofb = {0};
  578. enospc = 1;
  579. xfs_flush_inodes(ip->i_mount);
  580. xfs_iunlock(ip, iolock);
  581. eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
  582. xfs_icache_free_eofblocks(ip->i_mount, &eofb);
  583. xfs_icache_free_cowblocks(ip->i_mount, &eofb);
  584. goto write_retry;
  585. }
  586. current->backing_dev_info = NULL;
  587. out:
  588. if (iolock)
  589. xfs_iunlock(ip, iolock);
  590. if (ret > 0) {
  591. XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
  592. /* Handle various SYNC-type writes */
  593. ret = generic_write_sync(iocb, ret);
  594. }
  595. return ret;
  596. }
  597. STATIC ssize_t
  598. xfs_file_write_iter(
  599. struct kiocb *iocb,
  600. struct iov_iter *from)
  601. {
  602. struct file *file = iocb->ki_filp;
  603. struct address_space *mapping = file->f_mapping;
  604. struct inode *inode = mapping->host;
  605. struct xfs_inode *ip = XFS_I(inode);
  606. ssize_t ret;
  607. size_t ocount = iov_iter_count(from);
  608. XFS_STATS_INC(ip->i_mount, xs_write_calls);
  609. if (ocount == 0)
  610. return 0;
  611. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  612. return -EIO;
  613. if (IS_DAX(inode))
  614. return xfs_file_dax_write(iocb, from);
  615. if (iocb->ki_flags & IOCB_DIRECT) {
  616. /*
  617. * Allow a directio write to fall back to a buffered
  618. * write *only* in the case that we're doing a reflink
  619. * CoW. In all other directio scenarios we do not
  620. * allow an operation to fall back to buffered mode.
  621. */
  622. ret = xfs_file_dio_aio_write(iocb, from);
  623. if (ret != -EREMCHG)
  624. return ret;
  625. }
  626. return xfs_file_buffered_aio_write(iocb, from);
  627. }
  628. static void
  629. xfs_wait_dax_page(
  630. struct inode *inode)
  631. {
  632. struct xfs_inode *ip = XFS_I(inode);
  633. xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
  634. schedule();
  635. xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
  636. }
  637. static int
  638. xfs_break_dax_layouts(
  639. struct inode *inode,
  640. bool *retry)
  641. {
  642. struct page *page;
  643. ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
  644. page = dax_layout_busy_page(inode->i_mapping);
  645. if (!page)
  646. return 0;
  647. *retry = true;
  648. return ___wait_var_event(&page->_refcount,
  649. atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
  650. 0, 0, xfs_wait_dax_page(inode));
  651. }
  652. int
  653. xfs_break_layouts(
  654. struct inode *inode,
  655. uint *iolock,
  656. enum layout_break_reason reason)
  657. {
  658. bool retry;
  659. int error;
  660. ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
  661. do {
  662. retry = false;
  663. switch (reason) {
  664. case BREAK_UNMAP:
  665. error = xfs_break_dax_layouts(inode, &retry);
  666. if (error || retry)
  667. break;
  668. /* fall through */
  669. case BREAK_WRITE:
  670. error = xfs_break_leased_layouts(inode, iolock, &retry);
  671. break;
  672. default:
  673. WARN_ON_ONCE(1);
  674. error = -EINVAL;
  675. }
  676. } while (error == 0 && retry);
  677. return error;
  678. }
  679. #define XFS_FALLOC_FL_SUPPORTED \
  680. (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
  681. FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
  682. FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
  683. STATIC long
  684. xfs_file_fallocate(
  685. struct file *file,
  686. int mode,
  687. loff_t offset,
  688. loff_t len)
  689. {
  690. struct inode *inode = file_inode(file);
  691. struct xfs_inode *ip = XFS_I(inode);
  692. long error;
  693. enum xfs_prealloc_flags flags = 0;
  694. uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
  695. loff_t new_size = 0;
  696. bool do_file_insert = false;
  697. if (!S_ISREG(inode->i_mode))
  698. return -EINVAL;
  699. if (mode & ~XFS_FALLOC_FL_SUPPORTED)
  700. return -EOPNOTSUPP;
  701. xfs_ilock(ip, iolock);
  702. error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
  703. if (error)
  704. goto out_unlock;
  705. if (mode & FALLOC_FL_PUNCH_HOLE) {
  706. error = xfs_free_file_space(ip, offset, len);
  707. if (error)
  708. goto out_unlock;
  709. } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
  710. unsigned int blksize_mask = i_blocksize(inode) - 1;
  711. if (offset & blksize_mask || len & blksize_mask) {
  712. error = -EINVAL;
  713. goto out_unlock;
  714. }
  715. /*
  716. * There is no need to overlap collapse range with EOF,
  717. * in which case it is effectively a truncate operation
  718. */
  719. if (offset + len >= i_size_read(inode)) {
  720. error = -EINVAL;
  721. goto out_unlock;
  722. }
  723. new_size = i_size_read(inode) - len;
  724. error = xfs_collapse_file_space(ip, offset, len);
  725. if (error)
  726. goto out_unlock;
  727. } else if (mode & FALLOC_FL_INSERT_RANGE) {
  728. unsigned int blksize_mask = i_blocksize(inode) - 1;
  729. loff_t isize = i_size_read(inode);
  730. if (offset & blksize_mask || len & blksize_mask) {
  731. error = -EINVAL;
  732. goto out_unlock;
  733. }
  734. /*
  735. * New inode size must not exceed ->s_maxbytes, accounting for
  736. * possible signed overflow.
  737. */
  738. if (inode->i_sb->s_maxbytes - isize < len) {
  739. error = -EFBIG;
  740. goto out_unlock;
  741. }
  742. new_size = isize + len;
  743. /* Offset should be less than i_size */
  744. if (offset >= isize) {
  745. error = -EINVAL;
  746. goto out_unlock;
  747. }
  748. do_file_insert = true;
  749. } else {
  750. flags |= XFS_PREALLOC_SET;
  751. if (!(mode & FALLOC_FL_KEEP_SIZE) &&
  752. offset + len > i_size_read(inode)) {
  753. new_size = offset + len;
  754. error = inode_newsize_ok(inode, new_size);
  755. if (error)
  756. goto out_unlock;
  757. }
  758. if (mode & FALLOC_FL_ZERO_RANGE)
  759. error = xfs_zero_file_space(ip, offset, len);
  760. else {
  761. if (mode & FALLOC_FL_UNSHARE_RANGE) {
  762. error = xfs_reflink_unshare(ip, offset, len);
  763. if (error)
  764. goto out_unlock;
  765. }
  766. error = xfs_alloc_file_space(ip, offset, len,
  767. XFS_BMAPI_PREALLOC);
  768. }
  769. if (error)
  770. goto out_unlock;
  771. }
  772. if (file->f_flags & O_DSYNC)
  773. flags |= XFS_PREALLOC_SYNC;
  774. error = xfs_update_prealloc_flags(ip, flags);
  775. if (error)
  776. goto out_unlock;
  777. /* Change file size if needed */
  778. if (new_size) {
  779. struct iattr iattr;
  780. iattr.ia_valid = ATTR_SIZE;
  781. iattr.ia_size = new_size;
  782. error = xfs_vn_setattr_size(file_dentry(file), &iattr);
  783. if (error)
  784. goto out_unlock;
  785. }
  786. /*
  787. * Perform hole insertion now that the file size has been
  788. * updated so that if we crash during the operation we don't
  789. * leave shifted extents past EOF and hence losing access to
  790. * the data that is contained within them.
  791. */
  792. if (do_file_insert)
  793. error = xfs_insert_file_space(ip, offset, len);
  794. out_unlock:
  795. xfs_iunlock(ip, iolock);
  796. return error;
  797. }
  798. STATIC int
  799. xfs_file_clone_range(
  800. struct file *file_in,
  801. loff_t pos_in,
  802. struct file *file_out,
  803. loff_t pos_out,
  804. u64 len)
  805. {
  806. return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
  807. len, false);
  808. }
  809. STATIC ssize_t
  810. xfs_file_dedupe_range(
  811. struct file *src_file,
  812. u64 loff,
  813. u64 len,
  814. struct file *dst_file,
  815. u64 dst_loff)
  816. {
  817. struct inode *srci = file_inode(src_file);
  818. u64 max_dedupe;
  819. int error;
  820. /*
  821. * Since we have to read all these pages in to compare them, cut
  822. * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
  823. * That means we won't do more than MAX_RW_COUNT IO per request.
  824. */
  825. max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
  826. if (len > max_dedupe)
  827. len = max_dedupe;
  828. error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
  829. len, true);
  830. if (error)
  831. return error;
  832. return len;
  833. }
  834. STATIC int
  835. xfs_file_open(
  836. struct inode *inode,
  837. struct file *file)
  838. {
  839. if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
  840. return -EFBIG;
  841. if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
  842. return -EIO;
  843. file->f_mode |= FMODE_NOWAIT;
  844. return 0;
  845. }
  846. STATIC int
  847. xfs_dir_open(
  848. struct inode *inode,
  849. struct file *file)
  850. {
  851. struct xfs_inode *ip = XFS_I(inode);
  852. int mode;
  853. int error;
  854. error = xfs_file_open(inode, file);
  855. if (error)
  856. return error;
  857. /*
  858. * If there are any blocks, read-ahead block 0 as we're almost
  859. * certain to have the next operation be a read there.
  860. */
  861. mode = xfs_ilock_data_map_shared(ip);
  862. if (ip->i_d.di_nextents > 0)
  863. error = xfs_dir3_data_readahead(ip, 0, -1);
  864. xfs_iunlock(ip, mode);
  865. return error;
  866. }
  867. STATIC int
  868. xfs_file_release(
  869. struct inode *inode,
  870. struct file *filp)
  871. {
  872. return xfs_release(XFS_I(inode));
  873. }
  874. STATIC int
  875. xfs_file_readdir(
  876. struct file *file,
  877. struct dir_context *ctx)
  878. {
  879. struct inode *inode = file_inode(file);
  880. xfs_inode_t *ip = XFS_I(inode);
  881. size_t bufsize;
  882. /*
  883. * The Linux API doesn't pass down the total size of the buffer
  884. * we read into down to the filesystem. With the filldir concept
  885. * it's not needed for correct information, but the XFS dir2 leaf
  886. * code wants an estimate of the buffer size to calculate it's
  887. * readahead window and size the buffers used for mapping to
  888. * physical blocks.
  889. *
  890. * Try to give it an estimate that's good enough, maybe at some
  891. * point we can change the ->readdir prototype to include the
  892. * buffer size. For now we use the current glibc buffer size.
  893. */
  894. bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
  895. return xfs_readdir(NULL, ip, ctx, bufsize);
  896. }
  897. STATIC loff_t
  898. xfs_file_llseek(
  899. struct file *file,
  900. loff_t offset,
  901. int whence)
  902. {
  903. struct inode *inode = file->f_mapping->host;
  904. if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
  905. return -EIO;
  906. switch (whence) {
  907. default:
  908. return generic_file_llseek(file, offset, whence);
  909. case SEEK_HOLE:
  910. offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
  911. break;
  912. case SEEK_DATA:
  913. offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
  914. break;
  915. }
  916. if (offset < 0)
  917. return offset;
  918. return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  919. }
  920. /*
  921. * Locking for serialisation of IO during page faults. This results in a lock
  922. * ordering of:
  923. *
  924. * mmap_sem (MM)
  925. * sb_start_pagefault(vfs, freeze)
  926. * i_mmaplock (XFS - truncate serialisation)
  927. * page_lock (MM)
  928. * i_lock (XFS - extent map serialisation)
  929. */
  930. static vm_fault_t
  931. __xfs_filemap_fault(
  932. struct vm_fault *vmf,
  933. enum page_entry_size pe_size,
  934. bool write_fault)
  935. {
  936. struct inode *inode = file_inode(vmf->vma->vm_file);
  937. struct xfs_inode *ip = XFS_I(inode);
  938. vm_fault_t ret;
  939. trace_xfs_filemap_fault(ip, pe_size, write_fault);
  940. if (write_fault) {
  941. sb_start_pagefault(inode->i_sb);
  942. file_update_time(vmf->vma->vm_file);
  943. }
  944. xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  945. if (IS_DAX(inode)) {
  946. pfn_t pfn;
  947. ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
  948. if (ret & VM_FAULT_NEEDDSYNC)
  949. ret = dax_finish_sync_fault(vmf, pe_size, pfn);
  950. } else {
  951. if (write_fault)
  952. ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
  953. else
  954. ret = filemap_fault(vmf);
  955. }
  956. xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  957. if (write_fault)
  958. sb_end_pagefault(inode->i_sb);
  959. return ret;
  960. }
  961. static vm_fault_t
  962. xfs_filemap_fault(
  963. struct vm_fault *vmf)
  964. {
  965. /* DAX can shortcut the normal fault path on write faults! */
  966. return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
  967. IS_DAX(file_inode(vmf->vma->vm_file)) &&
  968. (vmf->flags & FAULT_FLAG_WRITE));
  969. }
  970. static vm_fault_t
  971. xfs_filemap_huge_fault(
  972. struct vm_fault *vmf,
  973. enum page_entry_size pe_size)
  974. {
  975. if (!IS_DAX(file_inode(vmf->vma->vm_file)))
  976. return VM_FAULT_FALLBACK;
  977. /* DAX can shortcut the normal fault path on write faults! */
  978. return __xfs_filemap_fault(vmf, pe_size,
  979. (vmf->flags & FAULT_FLAG_WRITE));
  980. }
  981. static vm_fault_t
  982. xfs_filemap_page_mkwrite(
  983. struct vm_fault *vmf)
  984. {
  985. return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
  986. }
  987. /*
  988. * pfn_mkwrite was originally intended to ensure we capture time stamp updates
  989. * on write faults. In reality, it needs to serialise against truncate and
  990. * prepare memory for writing so handle is as standard write fault.
  991. */
  992. static vm_fault_t
  993. xfs_filemap_pfn_mkwrite(
  994. struct vm_fault *vmf)
  995. {
  996. return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
  997. }
  998. static const struct vm_operations_struct xfs_file_vm_ops = {
  999. .fault = xfs_filemap_fault,
  1000. .huge_fault = xfs_filemap_huge_fault,
  1001. .map_pages = filemap_map_pages,
  1002. .page_mkwrite = xfs_filemap_page_mkwrite,
  1003. .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
  1004. };
  1005. STATIC int
  1006. xfs_file_mmap(
  1007. struct file *filp,
  1008. struct vm_area_struct *vma)
  1009. {
  1010. /*
  1011. * We don't support synchronous mappings for non-DAX files. At least
  1012. * until someone comes with a sensible use case.
  1013. */
  1014. if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
  1015. return -EOPNOTSUPP;
  1016. file_accessed(filp);
  1017. vma->vm_ops = &xfs_file_vm_ops;
  1018. if (IS_DAX(file_inode(filp)))
  1019. vma->vm_flags |= VM_HUGEPAGE;
  1020. return 0;
  1021. }
  1022. const struct file_operations xfs_file_operations = {
  1023. .llseek = xfs_file_llseek,
  1024. .read_iter = xfs_file_read_iter,
  1025. .write_iter = xfs_file_write_iter,
  1026. .splice_read = generic_file_splice_read,
  1027. .splice_write = iter_file_splice_write,
  1028. .unlocked_ioctl = xfs_file_ioctl,
  1029. #ifdef CONFIG_COMPAT
  1030. .compat_ioctl = xfs_file_compat_ioctl,
  1031. #endif
  1032. .mmap = xfs_file_mmap,
  1033. .mmap_supported_flags = MAP_SYNC,
  1034. .open = xfs_file_open,
  1035. .release = xfs_file_release,
  1036. .fsync = xfs_file_fsync,
  1037. .get_unmapped_area = thp_get_unmapped_area,
  1038. .fallocate = xfs_file_fallocate,
  1039. .clone_file_range = xfs_file_clone_range,
  1040. .dedupe_file_range = xfs_file_dedupe_range,
  1041. };
  1042. const struct file_operations xfs_dir_file_operations = {
  1043. .open = xfs_dir_open,
  1044. .read = generic_read_dir,
  1045. .iterate_shared = xfs_file_readdir,
  1046. .llseek = generic_file_llseek,
  1047. .unlocked_ioctl = xfs_file_ioctl,
  1048. #ifdef CONFIG_COMPAT
  1049. .compat_ioctl = xfs_file_compat_ioctl,
  1050. #endif
  1051. .fsync = xfs_dir_fsync,
  1052. };