file.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * linux/fs/ext4/file.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/file.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext4 fs regular file handling primitives
  16. *
  17. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  18. * (jj@sunsite.ms.mff.cuni.cz)
  19. */
  20. #include <linux/time.h>
  21. #include <linux/fs.h>
  22. #include <linux/mount.h>
  23. #include <linux/path.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/pagevec.h>
  26. #include <linux/uio.h>
  27. #include "ext4.h"
  28. #include "ext4_jbd2.h"
  29. #include "xattr.h"
  30. #include "acl.h"
  31. /*
  32. * Called when an inode is released. Note that this is different
  33. * from ext4_file_open: open gets called at every open, but release
  34. * gets called only when /all/ the files are closed.
  35. */
  36. static int ext4_release_file(struct inode *inode, struct file *filp)
  37. {
  38. if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  39. ext4_alloc_da_blocks(inode);
  40. ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  41. }
  42. /* if we are the last writer on the inode, drop the block reservation */
  43. if ((filp->f_mode & FMODE_WRITE) &&
  44. (atomic_read(&inode->i_writecount) == 1) &&
  45. !EXT4_I(inode)->i_reserved_data_blocks)
  46. {
  47. down_write(&EXT4_I(inode)->i_data_sem);
  48. ext4_discard_preallocations(inode);
  49. up_write(&EXT4_I(inode)->i_data_sem);
  50. }
  51. if (is_dx(inode) && filp->private_data)
  52. ext4_htree_free_dir_info(filp->private_data);
  53. return 0;
  54. }
  55. static void ext4_unwritten_wait(struct inode *inode)
  56. {
  57. wait_queue_head_t *wq = ext4_ioend_wq(inode);
  58. wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
  59. }
  60. /*
  61. * This tests whether the IO in question is block-aligned or not.
  62. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
  63. * are converted to written only after the IO is complete. Until they are
  64. * mapped, these blocks appear as holes, so dio_zero_block() will assume that
  65. * it needs to zero out portions of the start and/or end block. If 2 AIO
  66. * threads are at work on the same unwritten block, they must be synchronized
  67. * or one thread will zero the other's data, causing corruption.
  68. */
  69. static int
  70. ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
  71. {
  72. struct super_block *sb = inode->i_sb;
  73. int blockmask = sb->s_blocksize - 1;
  74. if (pos >= i_size_read(inode))
  75. return 0;
  76. if ((pos | iov_iter_alignment(from)) & blockmask)
  77. return 1;
  78. return 0;
  79. }
  80. static ssize_t
  81. ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  82. {
  83. struct file *file = iocb->ki_filp;
  84. struct inode *inode = file_inode(iocb->ki_filp);
  85. struct mutex *aio_mutex = NULL;
  86. struct blk_plug plug;
  87. int o_direct = iocb->ki_flags & IOCB_DIRECT;
  88. int overwrite = 0;
  89. ssize_t ret;
  90. /*
  91. * Unaligned direct AIO must be serialized; see comment above
  92. * In the case of O_APPEND, assume that we must always serialize
  93. */
  94. if (o_direct &&
  95. ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
  96. !is_sync_kiocb(iocb) &&
  97. (iocb->ki_flags & IOCB_APPEND ||
  98. ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
  99. aio_mutex = ext4_aio_mutex(inode);
  100. mutex_lock(aio_mutex);
  101. ext4_unwritten_wait(inode);
  102. }
  103. mutex_lock(&inode->i_mutex);
  104. ret = generic_write_checks(iocb, from);
  105. if (ret <= 0)
  106. goto out;
  107. /*
  108. * If we have encountered a bitmap-format file, the size limit
  109. * is smaller than s_maxbytes, which is for extent-mapped files.
  110. */
  111. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  112. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  113. if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
  114. ret = -EFBIG;
  115. goto out;
  116. }
  117. iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
  118. }
  119. iocb->private = &overwrite;
  120. if (o_direct) {
  121. size_t length = iov_iter_count(from);
  122. loff_t pos = iocb->ki_pos;
  123. blk_start_plug(&plug);
  124. /* check whether we do a DIO overwrite or not */
  125. if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
  126. !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
  127. struct ext4_map_blocks map;
  128. unsigned int blkbits = inode->i_blkbits;
  129. int err, len;
  130. map.m_lblk = pos >> blkbits;
  131. map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
  132. - map.m_lblk;
  133. len = map.m_len;
  134. err = ext4_map_blocks(NULL, inode, &map, 0);
  135. /*
  136. * 'err==len' means that all of blocks has
  137. * been preallocated no matter they are
  138. * initialized or not. For excluding
  139. * unwritten extents, we need to check
  140. * m_flags. There are two conditions that
  141. * indicate for initialized extents. 1) If we
  142. * hit extent cache, EXT4_MAP_MAPPED flag is
  143. * returned; 2) If we do a real lookup,
  144. * non-flags are returned. So we should check
  145. * these two conditions.
  146. */
  147. if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
  148. overwrite = 1;
  149. }
  150. }
  151. ret = __generic_file_write_iter(iocb, from);
  152. mutex_unlock(&inode->i_mutex);
  153. if (ret > 0) {
  154. ssize_t err;
  155. err = generic_write_sync(file, iocb->ki_pos - ret, ret);
  156. if (err < 0)
  157. ret = err;
  158. }
  159. if (o_direct)
  160. blk_finish_plug(&plug);
  161. if (aio_mutex)
  162. mutex_unlock(aio_mutex);
  163. return ret;
  164. out:
  165. mutex_unlock(&inode->i_mutex);
  166. if (aio_mutex)
  167. mutex_unlock(aio_mutex);
  168. return ret;
  169. }
  170. #ifdef CONFIG_FS_DAX
  171. static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  172. {
  173. return dax_fault(vma, vmf, ext4_get_block);
  174. /* Is this the right get_block? */
  175. }
  176. static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  177. {
  178. return dax_mkwrite(vma, vmf, ext4_get_block);
  179. }
  180. static const struct vm_operations_struct ext4_dax_vm_ops = {
  181. .fault = ext4_dax_fault,
  182. .page_mkwrite = ext4_dax_mkwrite,
  183. .pfn_mkwrite = dax_pfn_mkwrite,
  184. };
  185. #else
  186. #define ext4_dax_vm_ops ext4_file_vm_ops
  187. #endif
  188. static const struct vm_operations_struct ext4_file_vm_ops = {
  189. .fault = filemap_fault,
  190. .map_pages = filemap_map_pages,
  191. .page_mkwrite = ext4_page_mkwrite,
  192. };
  193. static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
  194. {
  195. struct inode *inode = file->f_mapping->host;
  196. if (ext4_encrypted_inode(inode)) {
  197. int err = ext4_generate_encryption_key(inode);
  198. if (err)
  199. return 0;
  200. }
  201. file_accessed(file);
  202. if (IS_DAX(file_inode(file))) {
  203. vma->vm_ops = &ext4_dax_vm_ops;
  204. vma->vm_flags |= VM_MIXEDMAP;
  205. } else {
  206. vma->vm_ops = &ext4_file_vm_ops;
  207. }
  208. return 0;
  209. }
  210. static int ext4_file_open(struct inode * inode, struct file * filp)
  211. {
  212. struct super_block *sb = inode->i_sb;
  213. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  214. struct vfsmount *mnt = filp->f_path.mnt;
  215. struct path path;
  216. char buf[64], *cp;
  217. int ret;
  218. if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
  219. !(sb->s_flags & MS_RDONLY))) {
  220. sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
  221. /*
  222. * Sample where the filesystem has been mounted and
  223. * store it in the superblock for sysadmin convenience
  224. * when trying to sort through large numbers of block
  225. * devices or filesystem images.
  226. */
  227. memset(buf, 0, sizeof(buf));
  228. path.mnt = mnt;
  229. path.dentry = mnt->mnt_root;
  230. cp = d_path(&path, buf, sizeof(buf));
  231. if (!IS_ERR(cp)) {
  232. handle_t *handle;
  233. int err;
  234. handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
  235. if (IS_ERR(handle))
  236. return PTR_ERR(handle);
  237. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  238. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  239. if (err) {
  240. ext4_journal_stop(handle);
  241. return err;
  242. }
  243. strlcpy(sbi->s_es->s_last_mounted, cp,
  244. sizeof(sbi->s_es->s_last_mounted));
  245. ext4_handle_dirty_super(handle, sb);
  246. ext4_journal_stop(handle);
  247. }
  248. }
  249. /*
  250. * Set up the jbd2_inode if we are opening the inode for
  251. * writing and the journal is present
  252. */
  253. if (filp->f_mode & FMODE_WRITE) {
  254. ret = ext4_inode_attach_jinode(inode);
  255. if (ret < 0)
  256. return ret;
  257. }
  258. ret = dquot_file_open(inode, filp);
  259. if (!ret && ext4_encrypted_inode(inode)) {
  260. ret = ext4_generate_encryption_key(inode);
  261. if (ret)
  262. ret = -EACCES;
  263. }
  264. return ret;
  265. }
  266. /*
  267. * Here we use ext4_map_blocks() to get a block mapping for a extent-based
  268. * file rather than ext4_ext_walk_space() because we can introduce
  269. * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
  270. * function. When extent status tree has been fully implemented, it will
  271. * track all extent status for a file and we can directly use it to
  272. * retrieve the offset for SEEK_DATA/SEEK_HOLE.
  273. */
  274. /*
  275. * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
  276. * lookup page cache to check whether or not there has some data between
  277. * [startoff, endoff] because, if this range contains an unwritten extent,
  278. * we determine this extent as a data or a hole according to whether the
  279. * page cache has data or not.
  280. */
  281. static int ext4_find_unwritten_pgoff(struct inode *inode,
  282. int whence,
  283. struct ext4_map_blocks *map,
  284. loff_t *offset)
  285. {
  286. struct pagevec pvec;
  287. unsigned int blkbits;
  288. pgoff_t index;
  289. pgoff_t end;
  290. loff_t endoff;
  291. loff_t startoff;
  292. loff_t lastoff;
  293. int found = 0;
  294. blkbits = inode->i_sb->s_blocksize_bits;
  295. startoff = *offset;
  296. lastoff = startoff;
  297. endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
  298. index = startoff >> PAGE_CACHE_SHIFT;
  299. end = endoff >> PAGE_CACHE_SHIFT;
  300. pagevec_init(&pvec, 0);
  301. do {
  302. int i, num;
  303. unsigned long nr_pages;
  304. num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
  305. nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
  306. (pgoff_t)num);
  307. if (nr_pages == 0) {
  308. if (whence == SEEK_DATA)
  309. break;
  310. BUG_ON(whence != SEEK_HOLE);
  311. /*
  312. * If this is the first time to go into the loop and
  313. * offset is not beyond the end offset, it will be a
  314. * hole at this offset
  315. */
  316. if (lastoff == startoff || lastoff < endoff)
  317. found = 1;
  318. break;
  319. }
  320. /*
  321. * If this is the first time to go into the loop and
  322. * offset is smaller than the first page offset, it will be a
  323. * hole at this offset.
  324. */
  325. if (lastoff == startoff && whence == SEEK_HOLE &&
  326. lastoff < page_offset(pvec.pages[0])) {
  327. found = 1;
  328. break;
  329. }
  330. for (i = 0; i < nr_pages; i++) {
  331. struct page *page = pvec.pages[i];
  332. struct buffer_head *bh, *head;
  333. /*
  334. * If the current offset is not beyond the end of given
  335. * range, it will be a hole.
  336. */
  337. if (lastoff < endoff && whence == SEEK_HOLE &&
  338. page->index > end) {
  339. found = 1;
  340. *offset = lastoff;
  341. goto out;
  342. }
  343. lock_page(page);
  344. if (unlikely(page->mapping != inode->i_mapping)) {
  345. unlock_page(page);
  346. continue;
  347. }
  348. if (!page_has_buffers(page)) {
  349. unlock_page(page);
  350. continue;
  351. }
  352. if (page_has_buffers(page)) {
  353. lastoff = page_offset(page);
  354. bh = head = page_buffers(page);
  355. do {
  356. if (buffer_uptodate(bh) ||
  357. buffer_unwritten(bh)) {
  358. if (whence == SEEK_DATA)
  359. found = 1;
  360. } else {
  361. if (whence == SEEK_HOLE)
  362. found = 1;
  363. }
  364. if (found) {
  365. *offset = max_t(loff_t,
  366. startoff, lastoff);
  367. unlock_page(page);
  368. goto out;
  369. }
  370. lastoff += bh->b_size;
  371. bh = bh->b_this_page;
  372. } while (bh != head);
  373. }
  374. lastoff = page_offset(page) + PAGE_SIZE;
  375. unlock_page(page);
  376. }
  377. /*
  378. * The no. of pages is less than our desired, that would be a
  379. * hole in there.
  380. */
  381. if (nr_pages < num && whence == SEEK_HOLE) {
  382. found = 1;
  383. *offset = lastoff;
  384. break;
  385. }
  386. index = pvec.pages[i - 1]->index + 1;
  387. pagevec_release(&pvec);
  388. } while (index <= end);
  389. out:
  390. pagevec_release(&pvec);
  391. return found;
  392. }
  393. /*
  394. * ext4_seek_data() retrieves the offset for SEEK_DATA.
  395. */
  396. static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
  397. {
  398. struct inode *inode = file->f_mapping->host;
  399. struct ext4_map_blocks map;
  400. struct extent_status es;
  401. ext4_lblk_t start, last, end;
  402. loff_t dataoff, isize;
  403. int blkbits;
  404. int ret = 0;
  405. mutex_lock(&inode->i_mutex);
  406. isize = i_size_read(inode);
  407. if (offset >= isize) {
  408. mutex_unlock(&inode->i_mutex);
  409. return -ENXIO;
  410. }
  411. blkbits = inode->i_sb->s_blocksize_bits;
  412. start = offset >> blkbits;
  413. last = start;
  414. end = isize >> blkbits;
  415. dataoff = offset;
  416. do {
  417. map.m_lblk = last;
  418. map.m_len = end - last + 1;
  419. ret = ext4_map_blocks(NULL, inode, &map, 0);
  420. if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
  421. if (last != start)
  422. dataoff = (loff_t)last << blkbits;
  423. break;
  424. }
  425. /*
  426. * If there is a delay extent at this offset,
  427. * it will be as a data.
  428. */
  429. ext4_es_find_delayed_extent_range(inode, last, last, &es);
  430. if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
  431. if (last != start)
  432. dataoff = (loff_t)last << blkbits;
  433. break;
  434. }
  435. /*
  436. * If there is a unwritten extent at this offset,
  437. * it will be as a data or a hole according to page
  438. * cache that has data or not.
  439. */
  440. if (map.m_flags & EXT4_MAP_UNWRITTEN) {
  441. int unwritten;
  442. unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
  443. &map, &dataoff);
  444. if (unwritten)
  445. break;
  446. }
  447. last++;
  448. dataoff = (loff_t)last << blkbits;
  449. } while (last <= end);
  450. mutex_unlock(&inode->i_mutex);
  451. if (dataoff > isize)
  452. return -ENXIO;
  453. return vfs_setpos(file, dataoff, maxsize);
  454. }
  455. /*
  456. * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  457. */
  458. static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
  459. {
  460. struct inode *inode = file->f_mapping->host;
  461. struct ext4_map_blocks map;
  462. struct extent_status es;
  463. ext4_lblk_t start, last, end;
  464. loff_t holeoff, isize;
  465. int blkbits;
  466. int ret = 0;
  467. mutex_lock(&inode->i_mutex);
  468. isize = i_size_read(inode);
  469. if (offset >= isize) {
  470. mutex_unlock(&inode->i_mutex);
  471. return -ENXIO;
  472. }
  473. blkbits = inode->i_sb->s_blocksize_bits;
  474. start = offset >> blkbits;
  475. last = start;
  476. end = isize >> blkbits;
  477. holeoff = offset;
  478. do {
  479. map.m_lblk = last;
  480. map.m_len = end - last + 1;
  481. ret = ext4_map_blocks(NULL, inode, &map, 0);
  482. if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
  483. last += ret;
  484. holeoff = (loff_t)last << blkbits;
  485. continue;
  486. }
  487. /*
  488. * If there is a delay extent at this offset,
  489. * we will skip this extent.
  490. */
  491. ext4_es_find_delayed_extent_range(inode, last, last, &es);
  492. if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
  493. last = es.es_lblk + es.es_len;
  494. holeoff = (loff_t)last << blkbits;
  495. continue;
  496. }
  497. /*
  498. * If there is a unwritten extent at this offset,
  499. * it will be as a data or a hole according to page
  500. * cache that has data or not.
  501. */
  502. if (map.m_flags & EXT4_MAP_UNWRITTEN) {
  503. int unwritten;
  504. unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
  505. &map, &holeoff);
  506. if (!unwritten) {
  507. last += ret;
  508. holeoff = (loff_t)last << blkbits;
  509. continue;
  510. }
  511. }
  512. /* find a hole */
  513. break;
  514. } while (last <= end);
  515. mutex_unlock(&inode->i_mutex);
  516. if (holeoff > isize)
  517. holeoff = isize;
  518. return vfs_setpos(file, holeoff, maxsize);
  519. }
  520. /*
  521. * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
  522. * by calling generic_file_llseek_size() with the appropriate maxbytes
  523. * value for each.
  524. */
  525. loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
  526. {
  527. struct inode *inode = file->f_mapping->host;
  528. loff_t maxbytes;
  529. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  530. maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
  531. else
  532. maxbytes = inode->i_sb->s_maxbytes;
  533. switch (whence) {
  534. case SEEK_SET:
  535. case SEEK_CUR:
  536. case SEEK_END:
  537. return generic_file_llseek_size(file, offset, whence,
  538. maxbytes, i_size_read(inode));
  539. case SEEK_DATA:
  540. return ext4_seek_data(file, offset, maxbytes);
  541. case SEEK_HOLE:
  542. return ext4_seek_hole(file, offset, maxbytes);
  543. }
  544. return -EINVAL;
  545. }
  546. const struct file_operations ext4_file_operations = {
  547. .llseek = ext4_llseek,
  548. .read_iter = generic_file_read_iter,
  549. .write_iter = ext4_file_write_iter,
  550. .unlocked_ioctl = ext4_ioctl,
  551. #ifdef CONFIG_COMPAT
  552. .compat_ioctl = ext4_compat_ioctl,
  553. #endif
  554. .mmap = ext4_file_mmap,
  555. .open = ext4_file_open,
  556. .release = ext4_release_file,
  557. .fsync = ext4_sync_file,
  558. .splice_read = generic_file_splice_read,
  559. .splice_write = iter_file_splice_write,
  560. .fallocate = ext4_fallocate,
  561. };
  562. const struct inode_operations ext4_file_inode_operations = {
  563. .setattr = ext4_setattr,
  564. .getattr = ext4_getattr,
  565. .setxattr = generic_setxattr,
  566. .getxattr = generic_getxattr,
  567. .listxattr = ext4_listxattr,
  568. .removexattr = generic_removexattr,
  569. .get_acl = ext4_get_acl,
  570. .set_acl = ext4_set_acl,
  571. .fiemap = ext4_fiemap,
  572. };