xfs_file.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_shared.h"
  21. #include "xfs_format.h"
  22. #include "xfs_log_format.h"
  23. #include "xfs_trans_resv.h"
  24. #include "xfs_mount.h"
  25. #include "xfs_da_format.h"
  26. #include "xfs_da_btree.h"
  27. #include "xfs_inode.h"
  28. #include "xfs_trans.h"
  29. #include "xfs_inode_item.h"
  30. #include "xfs_bmap.h"
  31. #include "xfs_bmap_util.h"
  32. #include "xfs_error.h"
  33. #include "xfs_dir2.h"
  34. #include "xfs_dir2_priv.h"
  35. #include "xfs_ioctl.h"
  36. #include "xfs_trace.h"
  37. #include "xfs_log.h"
  38. #include "xfs_icache.h"
  39. #include "xfs_pnfs.h"
  40. #include <linux/dcache.h>
  41. #include <linux/falloc.h>
  42. #include <linux/pagevec.h>
  43. #include <linux/backing-dev.h>
  44. static const struct vm_operations_struct xfs_file_vm_ops;
  45. /*
  46. * Locking primitives for read and write IO paths to ensure we consistently use
  47. * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
  48. */
  49. static inline void
  50. xfs_rw_ilock(
  51. struct xfs_inode *ip,
  52. int type)
  53. {
  54. if (type & XFS_IOLOCK_EXCL)
  55. mutex_lock(&VFS_I(ip)->i_mutex);
  56. xfs_ilock(ip, type);
  57. }
  58. static inline void
  59. xfs_rw_iunlock(
  60. struct xfs_inode *ip,
  61. int type)
  62. {
  63. xfs_iunlock(ip, type);
  64. if (type & XFS_IOLOCK_EXCL)
  65. mutex_unlock(&VFS_I(ip)->i_mutex);
  66. }
  67. static inline void
  68. xfs_rw_ilock_demote(
  69. struct xfs_inode *ip,
  70. int type)
  71. {
  72. xfs_ilock_demote(ip, type);
  73. if (type & XFS_IOLOCK_EXCL)
  74. mutex_unlock(&VFS_I(ip)->i_mutex);
  75. }
  76. /*
  77. * xfs_iozero clears the specified range supplied via the page cache (except in
  78. * the DAX case). Writes through the page cache will allocate blocks over holes,
  79. * though the callers usually map the holes first and avoid them. If a block is
  80. * not completely zeroed, then it will be read from disk before being partially
  81. * zeroed.
  82. *
  83. * In the DAX case, we can just directly write to the underlying pages. This
  84. * will not allocate blocks, but will avoid holes and unwritten extents and so
  85. * not do unnecessary work.
  86. */
  87. int
  88. xfs_iozero(
  89. struct xfs_inode *ip, /* inode */
  90. loff_t pos, /* offset in file */
  91. size_t count) /* size of data to zero */
  92. {
  93. struct page *page;
  94. struct address_space *mapping;
  95. int status = 0;
  96. mapping = VFS_I(ip)->i_mapping;
  97. do {
  98. unsigned offset, bytes;
  99. void *fsdata;
  100. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  101. bytes = PAGE_CACHE_SIZE - offset;
  102. if (bytes > count)
  103. bytes = count;
  104. if (IS_DAX(VFS_I(ip))) {
  105. status = dax_zero_page_range(VFS_I(ip), pos, bytes,
  106. xfs_get_blocks_direct);
  107. if (status)
  108. break;
  109. } else {
  110. status = pagecache_write_begin(NULL, mapping, pos, bytes,
  111. AOP_FLAG_UNINTERRUPTIBLE,
  112. &page, &fsdata);
  113. if (status)
  114. break;
  115. zero_user(page, offset, bytes);
  116. status = pagecache_write_end(NULL, mapping, pos, bytes,
  117. bytes, page, fsdata);
  118. WARN_ON(status <= 0); /* can't return less than zero! */
  119. status = 0;
  120. }
  121. pos += bytes;
  122. count -= bytes;
  123. } while (count);
  124. return status;
  125. }
  126. int
  127. xfs_update_prealloc_flags(
  128. struct xfs_inode *ip,
  129. enum xfs_prealloc_flags flags)
  130. {
  131. struct xfs_trans *tp;
  132. int error;
  133. tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
  134. error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
  135. if (error) {
  136. xfs_trans_cancel(tp);
  137. return error;
  138. }
  139. xfs_ilock(ip, XFS_ILOCK_EXCL);
  140. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  141. if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  142. ip->i_d.di_mode &= ~S_ISUID;
  143. if (ip->i_d.di_mode & S_IXGRP)
  144. ip->i_d.di_mode &= ~S_ISGID;
  145. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  146. }
  147. if (flags & XFS_PREALLOC_SET)
  148. ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  149. if (flags & XFS_PREALLOC_CLEAR)
  150. ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  151. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  152. if (flags & XFS_PREALLOC_SYNC)
  153. xfs_trans_set_sync(tp);
  154. return xfs_trans_commit(tp);
  155. }
  156. /*
  157. * Fsync operations on directories are much simpler than on regular files,
  158. * as there is no file data to flush, and thus also no need for explicit
  159. * cache flush operations, and there are no non-transaction metadata updates
  160. * on directories either.
  161. */
  162. STATIC int
  163. xfs_dir_fsync(
  164. struct file *file,
  165. loff_t start,
  166. loff_t end,
  167. int datasync)
  168. {
  169. struct xfs_inode *ip = XFS_I(file->f_mapping->host);
  170. struct xfs_mount *mp = ip->i_mount;
  171. xfs_lsn_t lsn = 0;
  172. trace_xfs_dir_fsync(ip);
  173. xfs_ilock(ip, XFS_ILOCK_SHARED);
  174. if (xfs_ipincount(ip))
  175. lsn = ip->i_itemp->ili_last_lsn;
  176. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  177. if (!lsn)
  178. return 0;
  179. return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
  180. }
  181. STATIC int
  182. xfs_file_fsync(
  183. struct file *file,
  184. loff_t start,
  185. loff_t end,
  186. int datasync)
  187. {
  188. struct inode *inode = file->f_mapping->host;
  189. struct xfs_inode *ip = XFS_I(inode);
  190. struct xfs_mount *mp = ip->i_mount;
  191. int error = 0;
  192. int log_flushed = 0;
  193. xfs_lsn_t lsn = 0;
  194. trace_xfs_file_fsync(ip);
  195. error = filemap_write_and_wait_range(inode->i_mapping, start, end);
  196. if (error)
  197. return error;
  198. if (XFS_FORCED_SHUTDOWN(mp))
  199. return -EIO;
  200. xfs_iflags_clear(ip, XFS_ITRUNCATED);
  201. if (mp->m_flags & XFS_MOUNT_BARRIER) {
  202. /*
  203. * If we have an RT and/or log subvolume we need to make sure
  204. * to flush the write cache the device used for file data
  205. * first. This is to ensure newly written file data make
  206. * it to disk before logging the new inode size in case of
  207. * an extending write.
  208. */
  209. if (XFS_IS_REALTIME_INODE(ip))
  210. xfs_blkdev_issue_flush(mp->m_rtdev_targp);
  211. else if (mp->m_logdev_targp != mp->m_ddev_targp)
  212. xfs_blkdev_issue_flush(mp->m_ddev_targp);
  213. }
  214. /*
  215. * All metadata updates are logged, which means that we just have to
  216. * flush the log up to the latest LSN that touched the inode. If we have
  217. * concurrent fsync/fdatasync() calls, we need them to all block on the
  218. * log force before we clear the ili_fsync_fields field. This ensures
  219. * that we don't get a racing sync operation that does not wait for the
  220. * metadata to hit the journal before returning. If we race with
  221. * clearing the ili_fsync_fields, then all that will happen is the log
  222. * force will do nothing as the lsn will already be on disk. We can't
  223. * race with setting ili_fsync_fields because that is done under
  224. * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
  225. * until after the ili_fsync_fields is cleared.
  226. */
  227. xfs_ilock(ip, XFS_ILOCK_SHARED);
  228. if (xfs_ipincount(ip)) {
  229. if (!datasync ||
  230. (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  231. lsn = ip->i_itemp->ili_last_lsn;
  232. }
  233. if (lsn) {
  234. error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
  235. ip->i_itemp->ili_fsync_fields = 0;
  236. }
  237. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  238. /*
  239. * If we only have a single device, and the log force about was
  240. * a no-op we might have to flush the data device cache here.
  241. * This can only happen for fdatasync/O_DSYNC if we were overwriting
  242. * an already allocated file and thus do not have any metadata to
  243. * commit.
  244. */
  245. if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
  246. mp->m_logdev_targp == mp->m_ddev_targp &&
  247. !XFS_IS_REALTIME_INODE(ip) &&
  248. !log_flushed)
  249. xfs_blkdev_issue_flush(mp->m_ddev_targp);
  250. return error;
  251. }
  252. STATIC ssize_t
  253. xfs_file_read_iter(
  254. struct kiocb *iocb,
  255. struct iov_iter *to)
  256. {
  257. struct file *file = iocb->ki_filp;
  258. struct inode *inode = file->f_mapping->host;
  259. struct xfs_inode *ip = XFS_I(inode);
  260. struct xfs_mount *mp = ip->i_mount;
  261. size_t size = iov_iter_count(to);
  262. ssize_t ret = 0;
  263. int ioflags = 0;
  264. xfs_fsize_t n;
  265. loff_t pos = iocb->ki_pos;
  266. XFS_STATS_INC(mp, xs_read_calls);
  267. if (unlikely(iocb->ki_flags & IOCB_DIRECT))
  268. ioflags |= XFS_IO_ISDIRECT;
  269. if (file->f_mode & FMODE_NOCMTIME)
  270. ioflags |= XFS_IO_INVIS;
  271. if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
  272. xfs_buftarg_t *target =
  273. XFS_IS_REALTIME_INODE(ip) ?
  274. mp->m_rtdev_targp : mp->m_ddev_targp;
  275. /* DIO must be aligned to device logical sector size */
  276. if ((pos | size) & target->bt_logical_sectormask) {
  277. if (pos == i_size_read(inode))
  278. return 0;
  279. return -EINVAL;
  280. }
  281. }
  282. n = mp->m_super->s_maxbytes - pos;
  283. if (n <= 0 || size == 0)
  284. return 0;
  285. if (n < size)
  286. size = n;
  287. if (XFS_FORCED_SHUTDOWN(mp))
  288. return -EIO;
  289. /*
  290. * Locking is a bit tricky here. If we take an exclusive lock for direct
  291. * IO, we effectively serialise all new concurrent read IO to this file
  292. * and block it behind IO that is currently in progress because IO in
  293. * progress holds the IO lock shared. We only need to hold the lock
  294. * exclusive to blow away the page cache, so only take lock exclusively
  295. * if the page cache needs invalidation. This allows the normal direct
  296. * IO case of no page cache pages to proceeed concurrently without
  297. * serialisation.
  298. */
  299. xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
  300. if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
  301. xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
  302. xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
  303. /*
  304. * The generic dio code only flushes the range of the particular
  305. * I/O. Because we take an exclusive lock here, this whole
  306. * sequence is considerably more expensive for us. This has a
  307. * noticeable performance impact for any file with cached pages,
  308. * even when outside of the range of the particular I/O.
  309. *
  310. * Hence, amortize the cost of the lock against a full file
  311. * flush and reduce the chances of repeated iolock cycles going
  312. * forward.
  313. */
  314. if (inode->i_mapping->nrpages) {
  315. ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  316. if (ret) {
  317. xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
  318. return ret;
  319. }
  320. /*
  321. * Invalidate whole pages. This can return an error if
  322. * we fail to invalidate a page, but this should never
  323. * happen on XFS. Warn if it does fail.
  324. */
  325. ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
  326. WARN_ON_ONCE(ret);
  327. ret = 0;
  328. }
  329. xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
  330. }
  331. trace_xfs_file_read(ip, size, pos, ioflags);
  332. ret = generic_file_read_iter(iocb, to);
  333. if (ret > 0)
  334. XFS_STATS_ADD(mp, xs_read_bytes, ret);
  335. xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
  336. return ret;
  337. }
  338. STATIC ssize_t
  339. xfs_file_splice_read(
  340. struct file *infilp,
  341. loff_t *ppos,
  342. struct pipe_inode_info *pipe,
  343. size_t count,
  344. unsigned int flags)
  345. {
  346. struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
  347. int ioflags = 0;
  348. ssize_t ret;
  349. XFS_STATS_INC(ip->i_mount, xs_read_calls);
  350. if (infilp->f_mode & FMODE_NOCMTIME)
  351. ioflags |= XFS_IO_INVIS;
  352. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  353. return -EIO;
  354. xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
  355. trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
  356. /* for dax, we need to avoid the page cache */
  357. if (IS_DAX(VFS_I(ip)))
  358. ret = default_file_splice_read(infilp, ppos, pipe, count, flags);
  359. else
  360. ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
  361. if (ret > 0)
  362. XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
  363. xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
  364. return ret;
  365. }
  366. /*
  367. * This routine is called to handle zeroing any space in the last block of the
  368. * file that is beyond the EOF. We do this since the size is being increased
  369. * without writing anything to that block and we don't want to read the
  370. * garbage on the disk.
  371. */
  372. STATIC int /* error (positive) */
  373. xfs_zero_last_block(
  374. struct xfs_inode *ip,
  375. xfs_fsize_t offset,
  376. xfs_fsize_t isize,
  377. bool *did_zeroing)
  378. {
  379. struct xfs_mount *mp = ip->i_mount;
  380. xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
  381. int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
  382. int zero_len;
  383. int nimaps = 1;
  384. int error = 0;
  385. struct xfs_bmbt_irec imap;
  386. xfs_ilock(ip, XFS_ILOCK_EXCL);
  387. error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
  388. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  389. if (error)
  390. return error;
  391. ASSERT(nimaps > 0);
  392. /*
  393. * If the block underlying isize is just a hole, then there
  394. * is nothing to zero.
  395. */
  396. if (imap.br_startblock == HOLESTARTBLOCK)
  397. return 0;
  398. zero_len = mp->m_sb.sb_blocksize - zero_offset;
  399. if (isize + zero_len > offset)
  400. zero_len = offset - isize;
  401. *did_zeroing = true;
  402. return xfs_iozero(ip, isize, zero_len);
  403. }
  404. /*
  405. * Zero any on disk space between the current EOF and the new, larger EOF.
  406. *
  407. * This handles the normal case of zeroing the remainder of the last block in
  408. * the file and the unusual case of zeroing blocks out beyond the size of the
  409. * file. This second case only happens with fixed size extents and when the
  410. * system crashes before the inode size was updated but after blocks were
  411. * allocated.
  412. *
  413. * Expects the iolock to be held exclusive, and will take the ilock internally.
  414. */
  415. int /* error (positive) */
  416. xfs_zero_eof(
  417. struct xfs_inode *ip,
  418. xfs_off_t offset, /* starting I/O offset */
  419. xfs_fsize_t isize, /* current inode size */
  420. bool *did_zeroing)
  421. {
  422. struct xfs_mount *mp = ip->i_mount;
  423. xfs_fileoff_t start_zero_fsb;
  424. xfs_fileoff_t end_zero_fsb;
  425. xfs_fileoff_t zero_count_fsb;
  426. xfs_fileoff_t last_fsb;
  427. xfs_fileoff_t zero_off;
  428. xfs_fsize_t zero_len;
  429. int nimaps;
  430. int error = 0;
  431. struct xfs_bmbt_irec imap;
  432. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  433. ASSERT(offset > isize);
  434. trace_xfs_zero_eof(ip, isize, offset - isize);
  435. /*
  436. * First handle zeroing the block on which isize resides.
  437. *
  438. * We only zero a part of that block so it is handled specially.
  439. */
  440. if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
  441. error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
  442. if (error)
  443. return error;
  444. }
  445. /*
  446. * Calculate the range between the new size and the old where blocks
  447. * needing to be zeroed may exist.
  448. *
  449. * To get the block where the last byte in the file currently resides,
  450. * we need to subtract one from the size and truncate back to a block
  451. * boundary. We subtract 1 in case the size is exactly on a block
  452. * boundary.
  453. */
  454. last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
  455. start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
  456. end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
  457. ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
  458. if (last_fsb == end_zero_fsb) {
  459. /*
  460. * The size was only incremented on its last block.
  461. * We took care of that above, so just return.
  462. */
  463. return 0;
  464. }
  465. ASSERT(start_zero_fsb <= end_zero_fsb);
  466. while (start_zero_fsb <= end_zero_fsb) {
  467. nimaps = 1;
  468. zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
  469. xfs_ilock(ip, XFS_ILOCK_EXCL);
  470. error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
  471. &imap, &nimaps, 0);
  472. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  473. if (error)
  474. return error;
  475. ASSERT(nimaps > 0);
  476. if (imap.br_state == XFS_EXT_UNWRITTEN ||
  477. imap.br_startblock == HOLESTARTBLOCK) {
  478. start_zero_fsb = imap.br_startoff + imap.br_blockcount;
  479. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  480. continue;
  481. }
  482. /*
  483. * There are blocks we need to zero.
  484. */
  485. zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
  486. zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
  487. if ((zero_off + zero_len) > offset)
  488. zero_len = offset - zero_off;
  489. error = xfs_iozero(ip, zero_off, zero_len);
  490. if (error)
  491. return error;
  492. *did_zeroing = true;
  493. start_zero_fsb = imap.br_startoff + imap.br_blockcount;
  494. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  495. }
  496. return 0;
  497. }
  498. /*
  499. * Common pre-write limit and setup checks.
  500. *
  501. * Called with the iolocked held either shared and exclusive according to
  502. * @iolock, and returns with it held. Might upgrade the iolock to exclusive
  503. * if called for a direct write beyond i_size.
  504. */
  505. STATIC ssize_t
  506. xfs_file_aio_write_checks(
  507. struct kiocb *iocb,
  508. struct iov_iter *from,
  509. int *iolock)
  510. {
  511. struct file *file = iocb->ki_filp;
  512. struct inode *inode = file->f_mapping->host;
  513. struct xfs_inode *ip = XFS_I(inode);
  514. ssize_t error = 0;
  515. size_t count = iov_iter_count(from);
  516. bool drained_dio = false;
  517. restart:
  518. error = generic_write_checks(iocb, from);
  519. if (error <= 0)
  520. return error;
  521. error = xfs_break_layouts(inode, iolock, true);
  522. if (error)
  523. return error;
  524. /* For changing security info in file_remove_privs() we need i_mutex */
  525. if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
  526. xfs_rw_iunlock(ip, *iolock);
  527. *iolock = XFS_IOLOCK_EXCL;
  528. xfs_rw_ilock(ip, *iolock);
  529. goto restart;
  530. }
  531. /*
  532. * If the offset is beyond the size of the file, we need to zero any
  533. * blocks that fall between the existing EOF and the start of this
  534. * write. If zeroing is needed and we are currently holding the
  535. * iolock shared, we need to update it to exclusive which implies
  536. * having to redo all checks before.
  537. *
  538. * We need to serialise against EOF updates that occur in IO
  539. * completions here. We want to make sure that nobody is changing the
  540. * size while we do this check until we have placed an IO barrier (i.e.
  541. * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
  542. * The spinlock effectively forms a memory barrier once we have the
  543. * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
  544. * and hence be able to correctly determine if we need to run zeroing.
  545. */
  546. spin_lock(&ip->i_flags_lock);
  547. if (iocb->ki_pos > i_size_read(inode)) {
  548. bool zero = false;
  549. spin_unlock(&ip->i_flags_lock);
  550. if (!drained_dio) {
  551. if (*iolock == XFS_IOLOCK_SHARED) {
  552. xfs_rw_iunlock(ip, *iolock);
  553. *iolock = XFS_IOLOCK_EXCL;
  554. xfs_rw_ilock(ip, *iolock);
  555. iov_iter_reexpand(from, count);
  556. }
  557. /*
  558. * We now have an IO submission barrier in place, but
  559. * AIO can do EOF updates during IO completion and hence
  560. * we now need to wait for all of them to drain. Non-AIO
  561. * DIO will have drained before we are given the
  562. * XFS_IOLOCK_EXCL, and so for most cases this wait is a
  563. * no-op.
  564. */
  565. inode_dio_wait(inode);
  566. drained_dio = true;
  567. goto restart;
  568. }
  569. error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
  570. if (error)
  571. return error;
  572. } else
  573. spin_unlock(&ip->i_flags_lock);
  574. /*
  575. * Updating the timestamps will grab the ilock again from
  576. * xfs_fs_dirty_inode, so we have to call it after dropping the
  577. * lock above. Eventually we should look into a way to avoid
  578. * the pointless lock roundtrip.
  579. */
  580. if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
  581. error = file_update_time(file);
  582. if (error)
  583. return error;
  584. }
  585. /*
  586. * If we're writing the file then make sure to clear the setuid and
  587. * setgid bits if the process is not being run by root. This keeps
  588. * people from modifying setuid and setgid binaries.
  589. */
  590. if (!IS_NOSEC(inode))
  591. return file_remove_privs(file);
  592. return 0;
  593. }
  594. /*
  595. * xfs_file_dio_aio_write - handle direct IO writes
  596. *
  597. * Lock the inode appropriately to prepare for and issue a direct IO write.
  598. * By separating it from the buffered write path we remove all the tricky to
  599. * follow locking changes and looping.
  600. *
  601. * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
  602. * until we're sure the bytes at the new EOF have been zeroed and/or the cached
  603. * pages are flushed out.
  604. *
  605. * In most cases the direct IO writes will be done holding IOLOCK_SHARED
  606. * allowing them to be done in parallel with reads and other direct IO writes.
  607. * However, if the IO is not aligned to filesystem blocks, the direct IO layer
  608. * needs to do sub-block zeroing and that requires serialisation against other
  609. * direct IOs to the same block. In this case we need to serialise the
  610. * submission of the unaligned IOs so that we don't get racing block zeroing in
  611. * the dio layer. To avoid the problem with aio, we also need to wait for
  612. * outstanding IOs to complete so that unwritten extent conversion is completed
  613. * before we try to map the overlapping block. This is currently implemented by
  614. * hitting it with a big hammer (i.e. inode_dio_wait()).
  615. *
  616. * Returns with locks held indicated by @iolock and errors indicated by
  617. * negative return values.
  618. */
  619. STATIC ssize_t
  620. xfs_file_dio_aio_write(
  621. struct kiocb *iocb,
  622. struct iov_iter *from)
  623. {
  624. struct file *file = iocb->ki_filp;
  625. struct address_space *mapping = file->f_mapping;
  626. struct inode *inode = mapping->host;
  627. struct xfs_inode *ip = XFS_I(inode);
  628. struct xfs_mount *mp = ip->i_mount;
  629. ssize_t ret = 0;
  630. int unaligned_io = 0;
  631. int iolock;
  632. size_t count = iov_iter_count(from);
  633. loff_t pos = iocb->ki_pos;
  634. loff_t end;
  635. struct iov_iter data;
  636. struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
  637. mp->m_rtdev_targp : mp->m_ddev_targp;
  638. /* DIO must be aligned to device logical sector size */
  639. if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask))
  640. return -EINVAL;
  641. /* "unaligned" here means not aligned to a filesystem block */
  642. if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
  643. unaligned_io = 1;
  644. /*
  645. * We don't need to take an exclusive lock unless there page cache needs
  646. * to be invalidated or unaligned IO is being executed. We don't need to
  647. * consider the EOF extension case here because
  648. * xfs_file_aio_write_checks() will relock the inode as necessary for
  649. * EOF zeroing cases and fill out the new inode size as appropriate.
  650. */
  651. if (unaligned_io || mapping->nrpages)
  652. iolock = XFS_IOLOCK_EXCL;
  653. else
  654. iolock = XFS_IOLOCK_SHARED;
  655. xfs_rw_ilock(ip, iolock);
  656. /*
  657. * Recheck if there are cached pages that need invalidate after we got
  658. * the iolock to protect against other threads adding new pages while
  659. * we were waiting for the iolock.
  660. */
  661. if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
  662. xfs_rw_iunlock(ip, iolock);
  663. iolock = XFS_IOLOCK_EXCL;
  664. xfs_rw_ilock(ip, iolock);
  665. }
  666. ret = xfs_file_aio_write_checks(iocb, from, &iolock);
  667. if (ret)
  668. goto out;
  669. count = iov_iter_count(from);
  670. pos = iocb->ki_pos;
  671. end = pos + count - 1;
  672. /*
  673. * See xfs_file_read_iter() for why we do a full-file flush here.
  674. */
  675. if (mapping->nrpages) {
  676. ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  677. if (ret)
  678. goto out;
  679. /*
  680. * Invalidate whole pages. This can return an error if we fail
  681. * to invalidate a page, but this should never happen on XFS.
  682. * Warn if it does fail.
  683. */
  684. ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
  685. WARN_ON_ONCE(ret);
  686. ret = 0;
  687. }
  688. /*
  689. * If we are doing unaligned IO, wait for all other IO to drain,
  690. * otherwise demote the lock if we had to flush cached pages
  691. */
  692. if (unaligned_io)
  693. inode_dio_wait(inode);
  694. else if (iolock == XFS_IOLOCK_EXCL) {
  695. xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
  696. iolock = XFS_IOLOCK_SHARED;
  697. }
  698. trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
  699. data = *from;
  700. ret = mapping->a_ops->direct_IO(iocb, &data, pos);
  701. /* see generic_file_direct_write() for why this is necessary */
  702. if (mapping->nrpages) {
  703. invalidate_inode_pages2_range(mapping,
  704. pos >> PAGE_CACHE_SHIFT,
  705. end >> PAGE_CACHE_SHIFT);
  706. }
  707. if (ret > 0) {
  708. pos += ret;
  709. iov_iter_advance(from, ret);
  710. iocb->ki_pos = pos;
  711. }
  712. out:
  713. xfs_rw_iunlock(ip, iolock);
  714. /*
  715. * No fallback to buffered IO on errors for XFS. DAX can result in
  716. * partial writes, but direct IO will either complete fully or fail.
  717. */
  718. ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip)));
  719. return ret;
  720. }
  721. STATIC ssize_t
  722. xfs_file_buffered_aio_write(
  723. struct kiocb *iocb,
  724. struct iov_iter *from)
  725. {
  726. struct file *file = iocb->ki_filp;
  727. struct address_space *mapping = file->f_mapping;
  728. struct inode *inode = mapping->host;
  729. struct xfs_inode *ip = XFS_I(inode);
  730. ssize_t ret;
  731. int enospc = 0;
  732. int iolock = XFS_IOLOCK_EXCL;
  733. xfs_rw_ilock(ip, iolock);
  734. ret = xfs_file_aio_write_checks(iocb, from, &iolock);
  735. if (ret)
  736. goto out;
  737. /* We can write back this queue in page reclaim */
  738. current->backing_dev_info = inode_to_bdi(inode);
  739. write_retry:
  740. trace_xfs_file_buffered_write(ip, iov_iter_count(from),
  741. iocb->ki_pos, 0);
  742. ret = generic_perform_write(file, from, iocb->ki_pos);
  743. if (likely(ret >= 0))
  744. iocb->ki_pos += ret;
  745. /*
  746. * If we hit a space limit, try to free up some lingering preallocated
  747. * space before returning an error. In the case of ENOSPC, first try to
  748. * write back all dirty inodes to free up some of the excess reserved
  749. * metadata space. This reduces the chances that the eofblocks scan
  750. * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
  751. * also behaves as a filter to prevent too many eofblocks scans from
  752. * running at the same time.
  753. */
  754. if (ret == -EDQUOT && !enospc) {
  755. enospc = xfs_inode_free_quota_eofblocks(ip);
  756. if (enospc)
  757. goto write_retry;
  758. } else if (ret == -ENOSPC && !enospc) {
  759. struct xfs_eofblocks eofb = {0};
  760. enospc = 1;
  761. xfs_flush_inodes(ip->i_mount);
  762. eofb.eof_scan_owner = ip->i_ino; /* for locking */
  763. eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
  764. xfs_icache_free_eofblocks(ip->i_mount, &eofb);
  765. goto write_retry;
  766. }
  767. current->backing_dev_info = NULL;
  768. out:
  769. xfs_rw_iunlock(ip, iolock);
  770. return ret;
  771. }
  772. STATIC ssize_t
  773. xfs_file_write_iter(
  774. struct kiocb *iocb,
  775. struct iov_iter *from)
  776. {
  777. struct file *file = iocb->ki_filp;
  778. struct address_space *mapping = file->f_mapping;
  779. struct inode *inode = mapping->host;
  780. struct xfs_inode *ip = XFS_I(inode);
  781. ssize_t ret;
  782. size_t ocount = iov_iter_count(from);
  783. XFS_STATS_INC(ip->i_mount, xs_write_calls);
  784. if (ocount == 0)
  785. return 0;
  786. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  787. return -EIO;
  788. if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode))
  789. ret = xfs_file_dio_aio_write(iocb, from);
  790. else
  791. ret = xfs_file_buffered_aio_write(iocb, from);
  792. if (ret > 0) {
  793. ssize_t err;
  794. XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
  795. /* Handle various SYNC-type writes */
  796. err = generic_write_sync(file, iocb->ki_pos - ret, ret);
  797. if (err < 0)
  798. ret = err;
  799. }
  800. return ret;
  801. }
  802. #define XFS_FALLOC_FL_SUPPORTED \
  803. (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
  804. FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
  805. FALLOC_FL_INSERT_RANGE)
  806. STATIC long
  807. xfs_file_fallocate(
  808. struct file *file,
  809. int mode,
  810. loff_t offset,
  811. loff_t len)
  812. {
  813. struct inode *inode = file_inode(file);
  814. struct xfs_inode *ip = XFS_I(inode);
  815. long error;
  816. enum xfs_prealloc_flags flags = 0;
  817. uint iolock = XFS_IOLOCK_EXCL;
  818. loff_t new_size = 0;
  819. bool do_file_insert = 0;
  820. if (!S_ISREG(inode->i_mode))
  821. return -EINVAL;
  822. if (mode & ~XFS_FALLOC_FL_SUPPORTED)
  823. return -EOPNOTSUPP;
  824. xfs_ilock(ip, iolock);
  825. error = xfs_break_layouts(inode, &iolock, false);
  826. if (error)
  827. goto out_unlock;
  828. xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
  829. iolock |= XFS_MMAPLOCK_EXCL;
  830. if (mode & FALLOC_FL_PUNCH_HOLE) {
  831. error = xfs_free_file_space(ip, offset, len);
  832. if (error)
  833. goto out_unlock;
  834. } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
  835. unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
  836. if (offset & blksize_mask || len & blksize_mask) {
  837. error = -EINVAL;
  838. goto out_unlock;
  839. }
  840. /*
  841. * There is no need to overlap collapse range with EOF,
  842. * in which case it is effectively a truncate operation
  843. */
  844. if (offset + len >= i_size_read(inode)) {
  845. error = -EINVAL;
  846. goto out_unlock;
  847. }
  848. new_size = i_size_read(inode) - len;
  849. error = xfs_collapse_file_space(ip, offset, len);
  850. if (error)
  851. goto out_unlock;
  852. } else if (mode & FALLOC_FL_INSERT_RANGE) {
  853. unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
  854. new_size = i_size_read(inode) + len;
  855. if (offset & blksize_mask || len & blksize_mask) {
  856. error = -EINVAL;
  857. goto out_unlock;
  858. }
  859. /* check the new inode size does not wrap through zero */
  860. if (new_size > inode->i_sb->s_maxbytes) {
  861. error = -EFBIG;
  862. goto out_unlock;
  863. }
  864. /* Offset should be less than i_size */
  865. if (offset >= i_size_read(inode)) {
  866. error = -EINVAL;
  867. goto out_unlock;
  868. }
  869. do_file_insert = 1;
  870. } else {
  871. flags |= XFS_PREALLOC_SET;
  872. if (!(mode & FALLOC_FL_KEEP_SIZE) &&
  873. offset + len > i_size_read(inode)) {
  874. new_size = offset + len;
  875. error = inode_newsize_ok(inode, new_size);
  876. if (error)
  877. goto out_unlock;
  878. }
  879. if (mode & FALLOC_FL_ZERO_RANGE)
  880. error = xfs_zero_file_space(ip, offset, len);
  881. else
  882. error = xfs_alloc_file_space(ip, offset, len,
  883. XFS_BMAPI_PREALLOC);
  884. if (error)
  885. goto out_unlock;
  886. }
  887. if (file->f_flags & O_DSYNC)
  888. flags |= XFS_PREALLOC_SYNC;
  889. error = xfs_update_prealloc_flags(ip, flags);
  890. if (error)
  891. goto out_unlock;
  892. /* Change file size if needed */
  893. if (new_size) {
  894. struct iattr iattr;
  895. iattr.ia_valid = ATTR_SIZE;
  896. iattr.ia_size = new_size;
  897. error = xfs_setattr_size(ip, &iattr);
  898. if (error)
  899. goto out_unlock;
  900. }
  901. /*
  902. * Perform hole insertion now that the file size has been
  903. * updated so that if we crash during the operation we don't
  904. * leave shifted extents past EOF and hence losing access to
  905. * the data that is contained within them.
  906. */
  907. if (do_file_insert)
  908. error = xfs_insert_file_space(ip, offset, len);
  909. out_unlock:
  910. xfs_iunlock(ip, iolock);
  911. return error;
  912. }
  913. STATIC int
  914. xfs_file_open(
  915. struct inode *inode,
  916. struct file *file)
  917. {
  918. if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
  919. return -EFBIG;
  920. if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
  921. return -EIO;
  922. return 0;
  923. }
  924. STATIC int
  925. xfs_dir_open(
  926. struct inode *inode,
  927. struct file *file)
  928. {
  929. struct xfs_inode *ip = XFS_I(inode);
  930. int mode;
  931. int error;
  932. error = xfs_file_open(inode, file);
  933. if (error)
  934. return error;
  935. /*
  936. * If there are any blocks, read-ahead block 0 as we're almost
  937. * certain to have the next operation be a read there.
  938. */
  939. mode = xfs_ilock_data_map_shared(ip);
  940. if (ip->i_d.di_nextents > 0)
  941. xfs_dir3_data_readahead(ip, 0, -1);
  942. xfs_iunlock(ip, mode);
  943. return 0;
  944. }
  945. STATIC int
  946. xfs_file_release(
  947. struct inode *inode,
  948. struct file *filp)
  949. {
  950. return xfs_release(XFS_I(inode));
  951. }
  952. STATIC int
  953. xfs_file_readdir(
  954. struct file *file,
  955. struct dir_context *ctx)
  956. {
  957. struct inode *inode = file_inode(file);
  958. xfs_inode_t *ip = XFS_I(inode);
  959. size_t bufsize;
  960. /*
  961. * The Linux API doesn't pass down the total size of the buffer
  962. * we read into down to the filesystem. With the filldir concept
  963. * it's not needed for correct information, but the XFS dir2 leaf
  964. * code wants an estimate of the buffer size to calculate it's
  965. * readahead window and size the buffers used for mapping to
  966. * physical blocks.
  967. *
  968. * Try to give it an estimate that's good enough, maybe at some
  969. * point we can change the ->readdir prototype to include the
  970. * buffer size. For now we use the current glibc buffer size.
  971. */
  972. bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
  973. return xfs_readdir(ip, ctx, bufsize);
  974. }
  975. /*
  976. * This type is designed to indicate the type of offset we would like
  977. * to search from page cache for xfs_seek_hole_data().
  978. */
  979. enum {
  980. HOLE_OFF = 0,
  981. DATA_OFF,
  982. };
  983. /*
  984. * Lookup the desired type of offset from the given page.
  985. *
  986. * On success, return true and the offset argument will point to the
  987. * start of the region that was found. Otherwise this function will
  988. * return false and keep the offset argument unchanged.
  989. */
  990. STATIC bool
  991. xfs_lookup_buffer_offset(
  992. struct page *page,
  993. loff_t *offset,
  994. unsigned int type)
  995. {
  996. loff_t lastoff = page_offset(page);
  997. bool found = false;
  998. struct buffer_head *bh, *head;
  999. bh = head = page_buffers(page);
  1000. do {
  1001. /*
  1002. * Unwritten extents that have data in the page
  1003. * cache covering them can be identified by the
  1004. * BH_Unwritten state flag. Pages with multiple
  1005. * buffers might have a mix of holes, data and
  1006. * unwritten extents - any buffer with valid
  1007. * data in it should have BH_Uptodate flag set
  1008. * on it.
  1009. */
  1010. if (buffer_unwritten(bh) ||
  1011. buffer_uptodate(bh)) {
  1012. if (type == DATA_OFF)
  1013. found = true;
  1014. } else {
  1015. if (type == HOLE_OFF)
  1016. found = true;
  1017. }
  1018. if (found) {
  1019. *offset = lastoff;
  1020. break;
  1021. }
  1022. lastoff += bh->b_size;
  1023. } while ((bh = bh->b_this_page) != head);
  1024. return found;
  1025. }
  1026. /*
  1027. * This routine is called to find out and return a data or hole offset
  1028. * from the page cache for unwritten extents according to the desired
  1029. * type for xfs_seek_hole_data().
  1030. *
  1031. * The argument offset is used to tell where we start to search from the
  1032. * page cache. Map is used to figure out the end points of the range to
  1033. * lookup pages.
  1034. *
  1035. * Return true if the desired type of offset was found, and the argument
  1036. * offset is filled with that address. Otherwise, return false and keep
  1037. * offset unchanged.
  1038. */
  1039. STATIC bool
  1040. xfs_find_get_desired_pgoff(
  1041. struct inode *inode,
  1042. struct xfs_bmbt_irec *map,
  1043. unsigned int type,
  1044. loff_t *offset)
  1045. {
  1046. struct xfs_inode *ip = XFS_I(inode);
  1047. struct xfs_mount *mp = ip->i_mount;
  1048. struct pagevec pvec;
  1049. pgoff_t index;
  1050. pgoff_t end;
  1051. loff_t endoff;
  1052. loff_t startoff = *offset;
  1053. loff_t lastoff = startoff;
  1054. bool found = false;
  1055. pagevec_init(&pvec, 0);
  1056. index = startoff >> PAGE_CACHE_SHIFT;
  1057. endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
  1058. end = endoff >> PAGE_CACHE_SHIFT;
  1059. do {
  1060. int want;
  1061. unsigned nr_pages;
  1062. unsigned int i;
  1063. want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
  1064. nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
  1065. want);
  1066. /*
  1067. * No page mapped into given range. If we are searching holes
  1068. * and if this is the first time we got into the loop, it means
  1069. * that the given offset is landed in a hole, return it.
  1070. *
  1071. * If we have already stepped through some block buffers to find
  1072. * holes but they all contains data. In this case, the last
  1073. * offset is already updated and pointed to the end of the last
  1074. * mapped page, if it does not reach the endpoint to search,
  1075. * that means there should be a hole between them.
  1076. */
  1077. if (nr_pages == 0) {
  1078. /* Data search found nothing */
  1079. if (type == DATA_OFF)
  1080. break;
  1081. ASSERT(type == HOLE_OFF);
  1082. if (lastoff == startoff || lastoff < endoff) {
  1083. found = true;
  1084. *offset = lastoff;
  1085. }
  1086. break;
  1087. }
  1088. /*
  1089. * At lease we found one page. If this is the first time we
  1090. * step into the loop, and if the first page index offset is
  1091. * greater than the given search offset, a hole was found.
  1092. */
  1093. if (type == HOLE_OFF && lastoff == startoff &&
  1094. lastoff < page_offset(pvec.pages[0])) {
  1095. found = true;
  1096. break;
  1097. }
  1098. for (i = 0; i < nr_pages; i++) {
  1099. struct page *page = pvec.pages[i];
  1100. loff_t b_offset;
  1101. /*
  1102. * At this point, the page may be truncated or
  1103. * invalidated (changing page->mapping to NULL),
  1104. * or even swizzled back from swapper_space to tmpfs
  1105. * file mapping. However, page->index will not change
  1106. * because we have a reference on the page.
  1107. *
  1108. * Searching done if the page index is out of range.
  1109. * If the current offset is not reaches the end of
  1110. * the specified search range, there should be a hole
  1111. * between them.
  1112. */
  1113. if (page->index > end) {
  1114. if (type == HOLE_OFF && lastoff < endoff) {
  1115. *offset = lastoff;
  1116. found = true;
  1117. }
  1118. goto out;
  1119. }
  1120. lock_page(page);
  1121. /*
  1122. * Page truncated or invalidated(page->mapping == NULL).
  1123. * We can freely skip it and proceed to check the next
  1124. * page.
  1125. */
  1126. if (unlikely(page->mapping != inode->i_mapping)) {
  1127. unlock_page(page);
  1128. continue;
  1129. }
  1130. if (!page_has_buffers(page)) {
  1131. unlock_page(page);
  1132. continue;
  1133. }
  1134. found = xfs_lookup_buffer_offset(page, &b_offset, type);
  1135. if (found) {
  1136. /*
  1137. * The found offset may be less than the start
  1138. * point to search if this is the first time to
  1139. * come here.
  1140. */
  1141. *offset = max_t(loff_t, startoff, b_offset);
  1142. unlock_page(page);
  1143. goto out;
  1144. }
  1145. /*
  1146. * We either searching data but nothing was found, or
  1147. * searching hole but found a data buffer. In either
  1148. * case, probably the next page contains the desired
  1149. * things, update the last offset to it so.
  1150. */
  1151. lastoff = page_offset(page) + PAGE_SIZE;
  1152. unlock_page(page);
  1153. }
  1154. /*
  1155. * The number of returned pages less than our desired, search
  1156. * done. In this case, nothing was found for searching data,
  1157. * but we found a hole behind the last offset.
  1158. */
  1159. if (nr_pages < want) {
  1160. if (type == HOLE_OFF) {
  1161. *offset = lastoff;
  1162. found = true;
  1163. }
  1164. break;
  1165. }
  1166. index = pvec.pages[i - 1]->index + 1;
  1167. pagevec_release(&pvec);
  1168. } while (index <= end);
  1169. out:
  1170. pagevec_release(&pvec);
  1171. return found;
  1172. }
  1173. STATIC loff_t
  1174. xfs_seek_hole_data(
  1175. struct file *file,
  1176. loff_t start,
  1177. int whence)
  1178. {
  1179. struct inode *inode = file->f_mapping->host;
  1180. struct xfs_inode *ip = XFS_I(inode);
  1181. struct xfs_mount *mp = ip->i_mount;
  1182. loff_t uninitialized_var(offset);
  1183. xfs_fsize_t isize;
  1184. xfs_fileoff_t fsbno;
  1185. xfs_filblks_t end;
  1186. uint lock;
  1187. int error;
  1188. if (XFS_FORCED_SHUTDOWN(mp))
  1189. return -EIO;
  1190. lock = xfs_ilock_data_map_shared(ip);
  1191. isize = i_size_read(inode);
  1192. if (start >= isize) {
  1193. error = -ENXIO;
  1194. goto out_unlock;
  1195. }
  1196. /*
  1197. * Try to read extents from the first block indicated
  1198. * by fsbno to the end block of the file.
  1199. */
  1200. fsbno = XFS_B_TO_FSBT(mp, start);
  1201. end = XFS_B_TO_FSB(mp, isize);
  1202. for (;;) {
  1203. struct xfs_bmbt_irec map[2];
  1204. int nmap = 2;
  1205. unsigned int i;
  1206. error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
  1207. XFS_BMAPI_ENTIRE);
  1208. if (error)
  1209. goto out_unlock;
  1210. /* No extents at given offset, must be beyond EOF */
  1211. if (nmap == 0) {
  1212. error = -ENXIO;
  1213. goto out_unlock;
  1214. }
  1215. for (i = 0; i < nmap; i++) {
  1216. offset = max_t(loff_t, start,
  1217. XFS_FSB_TO_B(mp, map[i].br_startoff));
  1218. /* Landed in the hole we wanted? */
  1219. if (whence == SEEK_HOLE &&
  1220. map[i].br_startblock == HOLESTARTBLOCK)
  1221. goto out;
  1222. /* Landed in the data extent we wanted? */
  1223. if (whence == SEEK_DATA &&
  1224. (map[i].br_startblock == DELAYSTARTBLOCK ||
  1225. (map[i].br_state == XFS_EXT_NORM &&
  1226. !isnullstartblock(map[i].br_startblock))))
  1227. goto out;
  1228. /*
  1229. * Landed in an unwritten extent, try to search
  1230. * for hole or data from page cache.
  1231. */
  1232. if (map[i].br_state == XFS_EXT_UNWRITTEN) {
  1233. if (xfs_find_get_desired_pgoff(inode, &map[i],
  1234. whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
  1235. &offset))
  1236. goto out;
  1237. }
  1238. }
  1239. /*
  1240. * We only received one extent out of the two requested. This
  1241. * means we've hit EOF and didn't find what we are looking for.
  1242. */
  1243. if (nmap == 1) {
  1244. /*
  1245. * If we were looking for a hole, set offset to
  1246. * the end of the file (i.e., there is an implicit
  1247. * hole at the end of any file).
  1248. */
  1249. if (whence == SEEK_HOLE) {
  1250. offset = isize;
  1251. break;
  1252. }
  1253. /*
  1254. * If we were looking for data, it's nowhere to be found
  1255. */
  1256. ASSERT(whence == SEEK_DATA);
  1257. error = -ENXIO;
  1258. goto out_unlock;
  1259. }
  1260. ASSERT(i > 1);
  1261. /*
  1262. * Nothing was found, proceed to the next round of search
  1263. * if the next reading offset is not at or beyond EOF.
  1264. */
  1265. fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
  1266. start = XFS_FSB_TO_B(mp, fsbno);
  1267. if (start >= isize) {
  1268. if (whence == SEEK_HOLE) {
  1269. offset = isize;
  1270. break;
  1271. }
  1272. ASSERT(whence == SEEK_DATA);
  1273. error = -ENXIO;
  1274. goto out_unlock;
  1275. }
  1276. }
  1277. out:
  1278. /*
  1279. * If at this point we have found the hole we wanted, the returned
  1280. * offset may be bigger than the file size as it may be aligned to
  1281. * page boundary for unwritten extents. We need to deal with this
  1282. * situation in particular.
  1283. */
  1284. if (whence == SEEK_HOLE)
  1285. offset = min_t(loff_t, offset, isize);
  1286. offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  1287. out_unlock:
  1288. xfs_iunlock(ip, lock);
  1289. if (error)
  1290. return error;
  1291. return offset;
  1292. }
  1293. STATIC loff_t
  1294. xfs_file_llseek(
  1295. struct file *file,
  1296. loff_t offset,
  1297. int whence)
  1298. {
  1299. switch (whence) {
  1300. case SEEK_END:
  1301. case SEEK_CUR:
  1302. case SEEK_SET:
  1303. return generic_file_llseek(file, offset, whence);
  1304. case SEEK_HOLE:
  1305. case SEEK_DATA:
  1306. return xfs_seek_hole_data(file, offset, whence);
  1307. default:
  1308. return -EINVAL;
  1309. }
  1310. }
  1311. /*
  1312. * Locking for serialisation of IO during page faults. This results in a lock
  1313. * ordering of:
  1314. *
  1315. * mmap_sem (MM)
  1316. * sb_start_pagefault(vfs, freeze)
  1317. * i_mmaplock (XFS - truncate serialisation)
  1318. * page_lock (MM)
  1319. * i_lock (XFS - extent map serialisation)
  1320. */
  1321. /*
  1322. * mmap()d file has taken write protection fault and is being made writable. We
  1323. * can set the page state up correctly for a writable page, which means we can
  1324. * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
  1325. * mapping.
  1326. */
  1327. STATIC int
  1328. xfs_filemap_page_mkwrite(
  1329. struct vm_area_struct *vma,
  1330. struct vm_fault *vmf)
  1331. {
  1332. struct inode *inode = file_inode(vma->vm_file);
  1333. int ret;
  1334. trace_xfs_filemap_page_mkwrite(XFS_I(inode));
  1335. sb_start_pagefault(inode->i_sb);
  1336. file_update_time(vma->vm_file);
  1337. xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  1338. if (IS_DAX(inode)) {
  1339. ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
  1340. } else {
  1341. ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
  1342. ret = block_page_mkwrite_return(ret);
  1343. }
  1344. xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  1345. sb_end_pagefault(inode->i_sb);
  1346. return ret;
  1347. }
  1348. STATIC int
  1349. xfs_filemap_fault(
  1350. struct vm_area_struct *vma,
  1351. struct vm_fault *vmf)
  1352. {
  1353. struct inode *inode = file_inode(vma->vm_file);
  1354. int ret;
  1355. trace_xfs_filemap_fault(XFS_I(inode));
  1356. /* DAX can shortcut the normal fault path on write faults! */
  1357. if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
  1358. return xfs_filemap_page_mkwrite(vma, vmf);
  1359. xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  1360. if (IS_DAX(inode)) {
  1361. /*
  1362. * we do not want to trigger unwritten extent conversion on read
  1363. * faults - that is unnecessary overhead and would also require
  1364. * changes to xfs_get_blocks_direct() to map unwritten extent
  1365. * ioend for conversion on read-only mappings.
  1366. */
  1367. ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
  1368. } else
  1369. ret = filemap_fault(vma, vmf);
  1370. xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  1371. return ret;
  1372. }
  1373. /*
  1374. * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
  1375. * both read and write faults. Hence we need to handle both cases. There is no
  1376. * ->pmd_mkwrite callout for huge pages, so we have a single function here to
  1377. * handle both cases here. @flags carries the information on the type of fault
  1378. * occuring.
  1379. */
  1380. STATIC int
  1381. xfs_filemap_pmd_fault(
  1382. struct vm_area_struct *vma,
  1383. unsigned long addr,
  1384. pmd_t *pmd,
  1385. unsigned int flags)
  1386. {
  1387. struct inode *inode = file_inode(vma->vm_file);
  1388. struct xfs_inode *ip = XFS_I(inode);
  1389. int ret;
  1390. if (!IS_DAX(inode))
  1391. return VM_FAULT_FALLBACK;
  1392. trace_xfs_filemap_pmd_fault(ip);
  1393. if (flags & FAULT_FLAG_WRITE) {
  1394. sb_start_pagefault(inode->i_sb);
  1395. file_update_time(vma->vm_file);
  1396. }
  1397. xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  1398. ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
  1399. NULL);
  1400. xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
  1401. if (flags & FAULT_FLAG_WRITE)
  1402. sb_end_pagefault(inode->i_sb);
  1403. return ret;
  1404. }
  1405. /*
  1406. * pfn_mkwrite was originally inteneded to ensure we capture time stamp
  1407. * updates on write faults. In reality, it's need to serialise against
  1408. * truncate similar to page_mkwrite. Hence we open-code dax_pfn_mkwrite()
  1409. * here and cycle the XFS_MMAPLOCK_SHARED to ensure we serialise the fault
  1410. * barrier in place.
  1411. */
  1412. static int
  1413. xfs_filemap_pfn_mkwrite(
  1414. struct vm_area_struct *vma,
  1415. struct vm_fault *vmf)
  1416. {
  1417. struct inode *inode = file_inode(vma->vm_file);
  1418. struct xfs_inode *ip = XFS_I(inode);
  1419. int ret = VM_FAULT_NOPAGE;
  1420. loff_t size;
  1421. trace_xfs_filemap_pfn_mkwrite(ip);
  1422. sb_start_pagefault(inode->i_sb);
  1423. file_update_time(vma->vm_file);
  1424. /* check if the faulting page hasn't raced with truncate */
  1425. xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
  1426. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1427. if (vmf->pgoff >= size)
  1428. ret = VM_FAULT_SIGBUS;
  1429. xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
  1430. sb_end_pagefault(inode->i_sb);
  1431. return ret;
  1432. }
  1433. static const struct vm_operations_struct xfs_file_vm_ops = {
  1434. .fault = xfs_filemap_fault,
  1435. .pmd_fault = xfs_filemap_pmd_fault,
  1436. .map_pages = filemap_map_pages,
  1437. .page_mkwrite = xfs_filemap_page_mkwrite,
  1438. .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
  1439. };
  1440. STATIC int
  1441. xfs_file_mmap(
  1442. struct file *filp,
  1443. struct vm_area_struct *vma)
  1444. {
  1445. file_accessed(filp);
  1446. vma->vm_ops = &xfs_file_vm_ops;
  1447. if (IS_DAX(file_inode(filp)))
  1448. vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
  1449. return 0;
  1450. }
  1451. const struct file_operations xfs_file_operations = {
  1452. .llseek = xfs_file_llseek,
  1453. .read_iter = xfs_file_read_iter,
  1454. .write_iter = xfs_file_write_iter,
  1455. .splice_read = xfs_file_splice_read,
  1456. .splice_write = iter_file_splice_write,
  1457. .unlocked_ioctl = xfs_file_ioctl,
  1458. #ifdef CONFIG_COMPAT
  1459. .compat_ioctl = xfs_file_compat_ioctl,
  1460. #endif
  1461. .mmap = xfs_file_mmap,
  1462. .open = xfs_file_open,
  1463. .release = xfs_file_release,
  1464. .fsync = xfs_file_fsync,
  1465. .fallocate = xfs_file_fallocate,
  1466. };
  1467. const struct file_operations xfs_dir_file_operations = {
  1468. .open = xfs_dir_open,
  1469. .read = generic_read_dir,
  1470. .iterate = xfs_file_readdir,
  1471. .llseek = generic_file_llseek,
  1472. .unlocked_ioctl = xfs_file_ioctl,
  1473. #ifdef CONFIG_COMPAT
  1474. .compat_ioctl = xfs_file_compat_ioctl,
  1475. #endif
  1476. .fsync = xfs_dir_fsync,
  1477. };