file.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/file.c
  4. *
  5. * Copyright (C) 1992, 1993, 1994, 1995
  6. * Remy Card (card@masi.ibp.fr)
  7. * Laboratoire MASI - Institut Blaise Pascal
  8. * Universite Pierre et Marie Curie (Paris VI)
  9. *
  10. * from
  11. *
  12. * linux/fs/minix/file.c
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * ext4 fs regular file handling primitives
  17. *
  18. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  19. * (jj@sunsite.ms.mff.cuni.cz)
  20. */
  21. #include <linux/time.h>
  22. #include <linux/fs.h>
  23. #include <linux/iomap.h>
  24. #include <linux/mount.h>
  25. #include <linux/path.h>
  26. #include <linux/dax.h>
  27. #include <linux/quotaops.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/uio.h>
  30. #include <linux/mman.h>
  31. #include "ext4.h"
  32. #include "ext4_jbd2.h"
  33. #include "xattr.h"
  34. #include "acl.h"
  35. #ifdef CONFIG_FS_DAX
  36. static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  37. {
  38. struct inode *inode = file_inode(iocb->ki_filp);
  39. ssize_t ret;
  40. if (!inode_trylock_shared(inode)) {
  41. if (iocb->ki_flags & IOCB_NOWAIT)
  42. return -EAGAIN;
  43. inode_lock_shared(inode);
  44. }
  45. /*
  46. * Recheck under inode lock - at this point we are sure it cannot
  47. * change anymore
  48. */
  49. if (!IS_DAX(inode)) {
  50. inode_unlock_shared(inode);
  51. /* Fallback to buffered IO in case we cannot support DAX */
  52. return generic_file_read_iter(iocb, to);
  53. }
  54. ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
  55. inode_unlock_shared(inode);
  56. file_accessed(iocb->ki_filp);
  57. return ret;
  58. }
  59. #endif
  60. static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  61. {
  62. if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
  63. return -EIO;
  64. if (!iov_iter_count(to))
  65. return 0; /* skip atime */
  66. #ifdef CONFIG_FS_DAX
  67. if (IS_DAX(file_inode(iocb->ki_filp)))
  68. return ext4_dax_read_iter(iocb, to);
  69. #endif
  70. return generic_file_read_iter(iocb, to);
  71. }
  72. /*
  73. * Called when an inode is released. Note that this is different
  74. * from ext4_file_open: open gets called at every open, but release
  75. * gets called only when /all/ the files are closed.
  76. */
  77. static int ext4_release_file(struct inode *inode, struct file *filp)
  78. {
  79. if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  80. ext4_alloc_da_blocks(inode);
  81. ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  82. }
  83. /* if we are the last writer on the inode, drop the block reservation */
  84. if ((filp->f_mode & FMODE_WRITE) &&
  85. (atomic_read(&inode->i_writecount) == 1) &&
  86. !EXT4_I(inode)->i_reserved_data_blocks)
  87. {
  88. down_write(&EXT4_I(inode)->i_data_sem);
  89. ext4_discard_preallocations(inode);
  90. up_write(&EXT4_I(inode)->i_data_sem);
  91. }
  92. if (is_dx(inode) && filp->private_data)
  93. ext4_htree_free_dir_info(filp->private_data);
  94. return 0;
  95. }
  96. static void ext4_unwritten_wait(struct inode *inode)
  97. {
  98. wait_queue_head_t *wq = ext4_ioend_wq(inode);
  99. wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
  100. }
  101. /*
  102. * This tests whether the IO in question is block-aligned or not.
  103. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
  104. * are converted to written only after the IO is complete. Until they are
  105. * mapped, these blocks appear as holes, so dio_zero_block() will assume that
  106. * it needs to zero out portions of the start and/or end block. If 2 AIO
  107. * threads are at work on the same unwritten block, they must be synchronized
  108. * or one thread will zero the other's data, causing corruption.
  109. */
  110. static int
  111. ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
  112. {
  113. struct super_block *sb = inode->i_sb;
  114. int blockmask = sb->s_blocksize - 1;
  115. if (pos >= i_size_read(inode))
  116. return 0;
  117. if ((pos | iov_iter_alignment(from)) & blockmask)
  118. return 1;
  119. return 0;
  120. }
  121. /* Is IO overwriting allocated and initialized blocks? */
  122. static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
  123. {
  124. struct ext4_map_blocks map;
  125. unsigned int blkbits = inode->i_blkbits;
  126. int err, blklen;
  127. if (pos + len > i_size_read(inode))
  128. return false;
  129. map.m_lblk = pos >> blkbits;
  130. map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
  131. blklen = map.m_len;
  132. err = ext4_map_blocks(NULL, inode, &map, 0);
  133. /*
  134. * 'err==len' means that all of the blocks have been preallocated,
  135. * regardless of whether they have been initialized or not. To exclude
  136. * unwritten extents, we need to check m_flags.
  137. */
  138. return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
  139. }
  140. static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
  141. {
  142. struct inode *inode = file_inode(iocb->ki_filp);
  143. ssize_t ret;
  144. ret = generic_write_checks(iocb, from);
  145. if (ret <= 0)
  146. return ret;
  147. /*
  148. * If we have encountered a bitmap-format file, the size limit
  149. * is smaller than s_maxbytes, which is for extent-mapped files.
  150. */
  151. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  152. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  153. if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
  154. return -EFBIG;
  155. iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
  156. }
  157. return iov_iter_count(from);
  158. }
  159. #ifdef CONFIG_FS_DAX
  160. static ssize_t
  161. ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
  162. {
  163. struct inode *inode = file_inode(iocb->ki_filp);
  164. ssize_t ret;
  165. if (!inode_trylock(inode)) {
  166. if (iocb->ki_flags & IOCB_NOWAIT)
  167. return -EAGAIN;
  168. inode_lock(inode);
  169. }
  170. ret = ext4_write_checks(iocb, from);
  171. if (ret <= 0)
  172. goto out;
  173. ret = file_remove_privs(iocb->ki_filp);
  174. if (ret)
  175. goto out;
  176. ret = file_update_time(iocb->ki_filp);
  177. if (ret)
  178. goto out;
  179. ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
  180. out:
  181. inode_unlock(inode);
  182. if (ret > 0)
  183. ret = generic_write_sync(iocb, ret);
  184. return ret;
  185. }
  186. #endif
  187. static ssize_t
  188. ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  189. {
  190. struct inode *inode = file_inode(iocb->ki_filp);
  191. int o_direct = iocb->ki_flags & IOCB_DIRECT;
  192. int unaligned_aio = 0;
  193. int overwrite = 0;
  194. ssize_t ret;
  195. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  196. return -EIO;
  197. #ifdef CONFIG_FS_DAX
  198. if (IS_DAX(inode))
  199. return ext4_dax_write_iter(iocb, from);
  200. #endif
  201. if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
  202. return -EOPNOTSUPP;
  203. if (!inode_trylock(inode)) {
  204. if (iocb->ki_flags & IOCB_NOWAIT)
  205. return -EAGAIN;
  206. inode_lock(inode);
  207. }
  208. ret = ext4_write_checks(iocb, from);
  209. if (ret <= 0)
  210. goto out;
  211. /*
  212. * Unaligned direct AIO must be serialized among each other as zeroing
  213. * of partial blocks of two competing unaligned AIOs can result in data
  214. * corruption.
  215. */
  216. if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
  217. !is_sync_kiocb(iocb) &&
  218. ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
  219. unaligned_aio = 1;
  220. ext4_unwritten_wait(inode);
  221. }
  222. iocb->private = &overwrite;
  223. /* Check whether we do a DIO overwrite or not */
  224. if (o_direct && !unaligned_aio) {
  225. if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
  226. if (ext4_should_dioread_nolock(inode))
  227. overwrite = 1;
  228. } else if (iocb->ki_flags & IOCB_NOWAIT) {
  229. ret = -EAGAIN;
  230. goto out;
  231. }
  232. }
  233. ret = __generic_file_write_iter(iocb, from);
  234. inode_unlock(inode);
  235. if (ret > 0)
  236. ret = generic_write_sync(iocb, ret);
  237. return ret;
  238. out:
  239. inode_unlock(inode);
  240. return ret;
  241. }
  242. #ifdef CONFIG_FS_DAX
  243. static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
  244. enum page_entry_size pe_size)
  245. {
  246. int error = 0;
  247. vm_fault_t result;
  248. int retries = 0;
  249. handle_t *handle = NULL;
  250. struct inode *inode = file_inode(vmf->vma->vm_file);
  251. struct super_block *sb = inode->i_sb;
  252. /*
  253. * We have to distinguish real writes from writes which will result in a
  254. * COW page; COW writes should *not* poke the journal (the file will not
  255. * be changed). Doing so would cause unintended failures when mounted
  256. * read-only.
  257. *
  258. * We check for VM_SHARED rather than vmf->cow_page since the latter is
  259. * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
  260. * other sizes, dax_iomap_fault will handle splitting / fallback so that
  261. * we eventually come back with a COW page.
  262. */
  263. bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
  264. (vmf->vma->vm_flags & VM_SHARED);
  265. pfn_t pfn;
  266. if (write) {
  267. sb_start_pagefault(sb);
  268. file_update_time(vmf->vma->vm_file);
  269. down_read(&EXT4_I(inode)->i_mmap_sem);
  270. retry:
  271. handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
  272. EXT4_DATA_TRANS_BLOCKS(sb));
  273. if (IS_ERR(handle)) {
  274. up_read(&EXT4_I(inode)->i_mmap_sem);
  275. sb_end_pagefault(sb);
  276. return VM_FAULT_SIGBUS;
  277. }
  278. } else {
  279. down_read(&EXT4_I(inode)->i_mmap_sem);
  280. }
  281. result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
  282. if (write) {
  283. ext4_journal_stop(handle);
  284. if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
  285. ext4_should_retry_alloc(sb, &retries))
  286. goto retry;
  287. /* Handling synchronous page fault? */
  288. if (result & VM_FAULT_NEEDDSYNC)
  289. result = dax_finish_sync_fault(vmf, pe_size, pfn);
  290. up_read(&EXT4_I(inode)->i_mmap_sem);
  291. sb_end_pagefault(sb);
  292. } else {
  293. up_read(&EXT4_I(inode)->i_mmap_sem);
  294. }
  295. return result;
  296. }
  297. static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
  298. {
  299. return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
  300. }
  301. static const struct vm_operations_struct ext4_dax_vm_ops = {
  302. .fault = ext4_dax_fault,
  303. .huge_fault = ext4_dax_huge_fault,
  304. .page_mkwrite = ext4_dax_fault,
  305. .pfn_mkwrite = ext4_dax_fault,
  306. };
  307. #else
  308. #define ext4_dax_vm_ops ext4_file_vm_ops
  309. #endif
  310. static const struct vm_operations_struct ext4_file_vm_ops = {
  311. .fault = ext4_filemap_fault,
  312. .map_pages = filemap_map_pages,
  313. .page_mkwrite = ext4_page_mkwrite,
  314. };
  315. static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
  316. {
  317. struct inode *inode = file->f_mapping->host;
  318. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  319. return -EIO;
  320. /*
  321. * We don't support synchronous mappings for non-DAX files. At least
  322. * until someone comes with a sensible use case.
  323. */
  324. if (!IS_DAX(file_inode(file)) && (vma->vm_flags & VM_SYNC))
  325. return -EOPNOTSUPP;
  326. file_accessed(file);
  327. if (IS_DAX(file_inode(file))) {
  328. vma->vm_ops = &ext4_dax_vm_ops;
  329. vma->vm_flags |= VM_HUGEPAGE;
  330. } else {
  331. vma->vm_ops = &ext4_file_vm_ops;
  332. }
  333. return 0;
  334. }
  335. static int ext4_sample_last_mounted(struct super_block *sb,
  336. struct vfsmount *mnt)
  337. {
  338. struct ext4_sb_info *sbi = EXT4_SB(sb);
  339. struct path path;
  340. char buf[64], *cp;
  341. handle_t *handle;
  342. int err;
  343. if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
  344. return 0;
  345. if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
  346. return 0;
  347. sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
  348. /*
  349. * Sample where the filesystem has been mounted and
  350. * store it in the superblock for sysadmin convenience
  351. * when trying to sort through large numbers of block
  352. * devices or filesystem images.
  353. */
  354. memset(buf, 0, sizeof(buf));
  355. path.mnt = mnt;
  356. path.dentry = mnt->mnt_root;
  357. cp = d_path(&path, buf, sizeof(buf));
  358. err = 0;
  359. if (IS_ERR(cp))
  360. goto out;
  361. handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
  362. err = PTR_ERR(handle);
  363. if (IS_ERR(handle))
  364. goto out;
  365. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  366. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  367. if (err)
  368. goto out_journal;
  369. strlcpy(sbi->s_es->s_last_mounted, cp,
  370. sizeof(sbi->s_es->s_last_mounted));
  371. ext4_handle_dirty_super(handle, sb);
  372. out_journal:
  373. ext4_journal_stop(handle);
  374. out:
  375. sb_end_intwrite(sb);
  376. return err;
  377. }
  378. static int ext4_file_open(struct inode * inode, struct file * filp)
  379. {
  380. int ret;
  381. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  382. return -EIO;
  383. ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
  384. if (ret)
  385. return ret;
  386. ret = fscrypt_file_open(inode, filp);
  387. if (ret)
  388. return ret;
  389. /*
  390. * Set up the jbd2_inode if we are opening the inode for
  391. * writing and the journal is present
  392. */
  393. if (filp->f_mode & FMODE_WRITE) {
  394. ret = ext4_inode_attach_jinode(inode);
  395. if (ret < 0)
  396. return ret;
  397. }
  398. filp->f_mode |= FMODE_NOWAIT;
  399. return dquot_file_open(inode, filp);
  400. }
  401. /*
  402. * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
  403. * by calling generic_file_llseek_size() with the appropriate maxbytes
  404. * value for each.
  405. */
  406. loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
  407. {
  408. struct inode *inode = file->f_mapping->host;
  409. loff_t maxbytes;
  410. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  411. maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
  412. else
  413. maxbytes = inode->i_sb->s_maxbytes;
  414. switch (whence) {
  415. default:
  416. return generic_file_llseek_size(file, offset, whence,
  417. maxbytes, i_size_read(inode));
  418. case SEEK_HOLE:
  419. inode_lock_shared(inode);
  420. offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
  421. inode_unlock_shared(inode);
  422. break;
  423. case SEEK_DATA:
  424. inode_lock_shared(inode);
  425. offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
  426. inode_unlock_shared(inode);
  427. break;
  428. }
  429. if (offset < 0)
  430. return offset;
  431. return vfs_setpos(file, offset, maxbytes);
  432. }
  433. const struct file_operations ext4_file_operations = {
  434. .llseek = ext4_llseek,
  435. .read_iter = ext4_file_read_iter,
  436. .write_iter = ext4_file_write_iter,
  437. .unlocked_ioctl = ext4_ioctl,
  438. #ifdef CONFIG_COMPAT
  439. .compat_ioctl = ext4_compat_ioctl,
  440. #endif
  441. .mmap = ext4_file_mmap,
  442. .mmap_supported_flags = MAP_SYNC,
  443. .open = ext4_file_open,
  444. .release = ext4_release_file,
  445. .fsync = ext4_sync_file,
  446. .get_unmapped_area = thp_get_unmapped_area,
  447. .splice_read = generic_file_splice_read,
  448. .splice_write = iter_file_splice_write,
  449. .fallocate = ext4_fallocate,
  450. };
  451. const struct inode_operations ext4_file_inode_operations = {
  452. .setattr = ext4_setattr,
  453. .getattr = ext4_file_getattr,
  454. .listxattr = ext4_listxattr,
  455. .get_acl = ext4_get_acl,
  456. .set_acl = ext4_set_acl,
  457. .fiemap = ext4_fiemap,
  458. };