file.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. /*
  2. * linux/fs/ext4/file.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/file.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext4 fs regular file handling primitives
  16. *
  17. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  18. * (jj@sunsite.ms.mff.cuni.cz)
  19. */
  20. #include <linux/time.h>
  21. #include <linux/fs.h>
  22. #include <linux/mount.h>
  23. #include <linux/path.h>
  24. #include <linux/dax.h>
  25. #include <linux/quotaops.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/uio.h>
  28. #include "ext4.h"
  29. #include "ext4_jbd2.h"
  30. #include "xattr.h"
  31. #include "acl.h"
  32. /*
  33. * Called when an inode is released. Note that this is different
  34. * from ext4_file_open: open gets called at every open, but release
  35. * gets called only when /all/ the files are closed.
  36. */
  37. static int ext4_release_file(struct inode *inode, struct file *filp)
  38. {
  39. if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  40. ext4_alloc_da_blocks(inode);
  41. ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  42. }
  43. /* if we are the last writer on the inode, drop the block reservation */
  44. if ((filp->f_mode & FMODE_WRITE) &&
  45. (atomic_read(&inode->i_writecount) == 1) &&
  46. !EXT4_I(inode)->i_reserved_data_blocks)
  47. {
  48. down_write(&EXT4_I(inode)->i_data_sem);
  49. ext4_discard_preallocations(inode);
  50. up_write(&EXT4_I(inode)->i_data_sem);
  51. }
  52. if (is_dx(inode) && filp->private_data)
  53. ext4_htree_free_dir_info(filp->private_data);
  54. return 0;
  55. }
  56. static void ext4_unwritten_wait(struct inode *inode)
  57. {
  58. wait_queue_head_t *wq = ext4_ioend_wq(inode);
  59. wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
  60. }
  61. /*
  62. * This tests whether the IO in question is block-aligned or not.
  63. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
  64. * are converted to written only after the IO is complete. Until they are
  65. * mapped, these blocks appear as holes, so dio_zero_block() will assume that
  66. * it needs to zero out portions of the start and/or end block. If 2 AIO
  67. * threads are at work on the same unwritten block, they must be synchronized
  68. * or one thread will zero the other's data, causing corruption.
  69. */
  70. static int
  71. ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
  72. {
  73. struct super_block *sb = inode->i_sb;
  74. int blockmask = sb->s_blocksize - 1;
  75. if (pos >= i_size_read(inode))
  76. return 0;
  77. if ((pos | iov_iter_alignment(from)) & blockmask)
  78. return 1;
  79. return 0;
  80. }
  81. static ssize_t
  82. ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  83. {
  84. struct file *file = iocb->ki_filp;
  85. struct inode *inode = file_inode(iocb->ki_filp);
  86. struct mutex *aio_mutex = NULL;
  87. struct blk_plug plug;
  88. int o_direct = iocb->ki_flags & IOCB_DIRECT;
  89. int overwrite = 0;
  90. ssize_t ret;
  91. /*
  92. * Unaligned direct AIO must be serialized; see comment above
  93. * In the case of O_APPEND, assume that we must always serialize
  94. */
  95. if (o_direct &&
  96. ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
  97. !is_sync_kiocb(iocb) &&
  98. (iocb->ki_flags & IOCB_APPEND ||
  99. ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
  100. aio_mutex = ext4_aio_mutex(inode);
  101. mutex_lock(aio_mutex);
  102. ext4_unwritten_wait(inode);
  103. }
  104. mutex_lock(&inode->i_mutex);
  105. ret = generic_write_checks(iocb, from);
  106. if (ret <= 0)
  107. goto out;
  108. /*
  109. * If we have encountered a bitmap-format file, the size limit
  110. * is smaller than s_maxbytes, which is for extent-mapped files.
  111. */
  112. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  113. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  114. if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
  115. ret = -EFBIG;
  116. goto out;
  117. }
  118. iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
  119. }
  120. iocb->private = &overwrite;
  121. if (o_direct) {
  122. size_t length = iov_iter_count(from);
  123. loff_t pos = iocb->ki_pos;
  124. blk_start_plug(&plug);
  125. /* check whether we do a DIO overwrite or not */
  126. if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
  127. !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
  128. struct ext4_map_blocks map;
  129. unsigned int blkbits = inode->i_blkbits;
  130. int err, len;
  131. map.m_lblk = pos >> blkbits;
  132. map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
  133. - map.m_lblk;
  134. len = map.m_len;
  135. err = ext4_map_blocks(NULL, inode, &map, 0);
  136. /*
  137. * 'err==len' means that all of blocks has
  138. * been preallocated no matter they are
  139. * initialized or not. For excluding
  140. * unwritten extents, we need to check
  141. * m_flags. There are two conditions that
  142. * indicate for initialized extents. 1) If we
  143. * hit extent cache, EXT4_MAP_MAPPED flag is
  144. * returned; 2) If we do a real lookup,
  145. * non-flags are returned. So we should check
  146. * these two conditions.
  147. */
  148. if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
  149. overwrite = 1;
  150. }
  151. }
  152. ret = __generic_file_write_iter(iocb, from);
  153. mutex_unlock(&inode->i_mutex);
  154. if (ret > 0) {
  155. ssize_t err;
  156. err = generic_write_sync(file, iocb->ki_pos - ret, ret);
  157. if (err < 0)
  158. ret = err;
  159. }
  160. if (o_direct)
  161. blk_finish_plug(&plug);
  162. if (aio_mutex)
  163. mutex_unlock(aio_mutex);
  164. return ret;
  165. out:
  166. mutex_unlock(&inode->i_mutex);
  167. if (aio_mutex)
  168. mutex_unlock(aio_mutex);
  169. return ret;
  170. }
  171. #ifdef CONFIG_FS_DAX
  172. static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
  173. {
  174. struct inode *inode = bh->b_assoc_map->host;
  175. /* XXX: breaks on 32-bit > 16TB. Is that even supported? */
  176. loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
  177. int err;
  178. if (!uptodate)
  179. return;
  180. WARN_ON(!buffer_unwritten(bh));
  181. err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
  182. }
  183. static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  184. {
  185. int result;
  186. handle_t *handle = NULL;
  187. struct super_block *sb = file_inode(vma->vm_file)->i_sb;
  188. bool write = vmf->flags & FAULT_FLAG_WRITE;
  189. if (write) {
  190. sb_start_pagefault(sb);
  191. file_update_time(vma->vm_file);
  192. handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
  193. EXT4_DATA_TRANS_BLOCKS(sb));
  194. }
  195. if (IS_ERR(handle))
  196. result = VM_FAULT_SIGBUS;
  197. else
  198. result = __dax_fault(vma, vmf, ext4_get_block_dax,
  199. ext4_end_io_unwritten);
  200. if (write) {
  201. if (!IS_ERR(handle))
  202. ext4_journal_stop(handle);
  203. sb_end_pagefault(sb);
  204. }
  205. return result;
  206. }
  207. static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
  208. pmd_t *pmd, unsigned int flags)
  209. {
  210. int result;
  211. handle_t *handle = NULL;
  212. struct inode *inode = file_inode(vma->vm_file);
  213. struct super_block *sb = inode->i_sb;
  214. bool write = flags & FAULT_FLAG_WRITE;
  215. if (write) {
  216. sb_start_pagefault(sb);
  217. file_update_time(vma->vm_file);
  218. handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
  219. ext4_chunk_trans_blocks(inode,
  220. PMD_SIZE / PAGE_SIZE));
  221. }
  222. if (IS_ERR(handle))
  223. result = VM_FAULT_SIGBUS;
  224. else
  225. result = __dax_pmd_fault(vma, addr, pmd, flags,
  226. ext4_get_block_dax, ext4_end_io_unwritten);
  227. if (write) {
  228. if (!IS_ERR(handle))
  229. ext4_journal_stop(handle);
  230. sb_end_pagefault(sb);
  231. }
  232. return result;
  233. }
  234. static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  235. {
  236. return dax_mkwrite(vma, vmf, ext4_get_block_dax,
  237. ext4_end_io_unwritten);
  238. }
  239. static const struct vm_operations_struct ext4_dax_vm_ops = {
  240. .fault = ext4_dax_fault,
  241. .pmd_fault = ext4_dax_pmd_fault,
  242. .page_mkwrite = ext4_dax_mkwrite,
  243. .pfn_mkwrite = dax_pfn_mkwrite,
  244. };
  245. #else
  246. #define ext4_dax_vm_ops ext4_file_vm_ops
  247. #endif
  248. static const struct vm_operations_struct ext4_file_vm_ops = {
  249. .fault = filemap_fault,
  250. .map_pages = filemap_map_pages,
  251. .page_mkwrite = ext4_page_mkwrite,
  252. };
  253. static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
  254. {
  255. struct inode *inode = file->f_mapping->host;
  256. if (ext4_encrypted_inode(inode)) {
  257. int err = ext4_get_encryption_info(inode);
  258. if (err)
  259. return 0;
  260. if (ext4_encryption_info(inode) == NULL)
  261. return -ENOKEY;
  262. }
  263. file_accessed(file);
  264. if (IS_DAX(file_inode(file))) {
  265. vma->vm_ops = &ext4_dax_vm_ops;
  266. vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
  267. } else {
  268. vma->vm_ops = &ext4_file_vm_ops;
  269. }
  270. return 0;
  271. }
  272. static int ext4_file_open(struct inode * inode, struct file * filp)
  273. {
  274. struct super_block *sb = inode->i_sb;
  275. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  276. struct vfsmount *mnt = filp->f_path.mnt;
  277. struct path path;
  278. char buf[64], *cp;
  279. int ret;
  280. if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
  281. !(sb->s_flags & MS_RDONLY))) {
  282. sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
  283. /*
  284. * Sample where the filesystem has been mounted and
  285. * store it in the superblock for sysadmin convenience
  286. * when trying to sort through large numbers of block
  287. * devices or filesystem images.
  288. */
  289. memset(buf, 0, sizeof(buf));
  290. path.mnt = mnt;
  291. path.dentry = mnt->mnt_root;
  292. cp = d_path(&path, buf, sizeof(buf));
  293. if (!IS_ERR(cp)) {
  294. handle_t *handle;
  295. int err;
  296. handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
  297. if (IS_ERR(handle))
  298. return PTR_ERR(handle);
  299. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  300. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  301. if (err) {
  302. ext4_journal_stop(handle);
  303. return err;
  304. }
  305. strlcpy(sbi->s_es->s_last_mounted, cp,
  306. sizeof(sbi->s_es->s_last_mounted));
  307. ext4_handle_dirty_super(handle, sb);
  308. ext4_journal_stop(handle);
  309. }
  310. }
  311. if (ext4_encrypted_inode(inode)) {
  312. ret = ext4_get_encryption_info(inode);
  313. if (ret)
  314. return -EACCES;
  315. if (ext4_encryption_info(inode) == NULL)
  316. return -ENOKEY;
  317. }
  318. /*
  319. * Set up the jbd2_inode if we are opening the inode for
  320. * writing and the journal is present
  321. */
  322. if (filp->f_mode & FMODE_WRITE) {
  323. ret = ext4_inode_attach_jinode(inode);
  324. if (ret < 0)
  325. return ret;
  326. }
  327. return dquot_file_open(inode, filp);
  328. }
  329. /*
  330. * Here we use ext4_map_blocks() to get a block mapping for a extent-based
  331. * file rather than ext4_ext_walk_space() because we can introduce
  332. * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
  333. * function. When extent status tree has been fully implemented, it will
  334. * track all extent status for a file and we can directly use it to
  335. * retrieve the offset for SEEK_DATA/SEEK_HOLE.
  336. */
  337. /*
  338. * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
  339. * lookup page cache to check whether or not there has some data between
  340. * [startoff, endoff] because, if this range contains an unwritten extent,
  341. * we determine this extent as a data or a hole according to whether the
  342. * page cache has data or not.
  343. */
  344. static int ext4_find_unwritten_pgoff(struct inode *inode,
  345. int whence,
  346. struct ext4_map_blocks *map,
  347. loff_t *offset)
  348. {
  349. struct pagevec pvec;
  350. unsigned int blkbits;
  351. pgoff_t index;
  352. pgoff_t end;
  353. loff_t endoff;
  354. loff_t startoff;
  355. loff_t lastoff;
  356. int found = 0;
  357. blkbits = inode->i_sb->s_blocksize_bits;
  358. startoff = *offset;
  359. lastoff = startoff;
  360. endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
  361. index = startoff >> PAGE_CACHE_SHIFT;
  362. end = endoff >> PAGE_CACHE_SHIFT;
  363. pagevec_init(&pvec, 0);
  364. do {
  365. int i, num;
  366. unsigned long nr_pages;
  367. num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
  368. nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
  369. (pgoff_t)num);
  370. if (nr_pages == 0) {
  371. if (whence == SEEK_DATA)
  372. break;
  373. BUG_ON(whence != SEEK_HOLE);
  374. /*
  375. * If this is the first time to go into the loop and
  376. * offset is not beyond the end offset, it will be a
  377. * hole at this offset
  378. */
  379. if (lastoff == startoff || lastoff < endoff)
  380. found = 1;
  381. break;
  382. }
  383. /*
  384. * If this is the first time to go into the loop and
  385. * offset is smaller than the first page offset, it will be a
  386. * hole at this offset.
  387. */
  388. if (lastoff == startoff && whence == SEEK_HOLE &&
  389. lastoff < page_offset(pvec.pages[0])) {
  390. found = 1;
  391. break;
  392. }
  393. for (i = 0; i < nr_pages; i++) {
  394. struct page *page = pvec.pages[i];
  395. struct buffer_head *bh, *head;
  396. /*
  397. * If the current offset is not beyond the end of given
  398. * range, it will be a hole.
  399. */
  400. if (lastoff < endoff && whence == SEEK_HOLE &&
  401. page->index > end) {
  402. found = 1;
  403. *offset = lastoff;
  404. goto out;
  405. }
  406. lock_page(page);
  407. if (unlikely(page->mapping != inode->i_mapping)) {
  408. unlock_page(page);
  409. continue;
  410. }
  411. if (!page_has_buffers(page)) {
  412. unlock_page(page);
  413. continue;
  414. }
  415. if (page_has_buffers(page)) {
  416. lastoff = page_offset(page);
  417. bh = head = page_buffers(page);
  418. do {
  419. if (buffer_uptodate(bh) ||
  420. buffer_unwritten(bh)) {
  421. if (whence == SEEK_DATA)
  422. found = 1;
  423. } else {
  424. if (whence == SEEK_HOLE)
  425. found = 1;
  426. }
  427. if (found) {
  428. *offset = max_t(loff_t,
  429. startoff, lastoff);
  430. unlock_page(page);
  431. goto out;
  432. }
  433. lastoff += bh->b_size;
  434. bh = bh->b_this_page;
  435. } while (bh != head);
  436. }
  437. lastoff = page_offset(page) + PAGE_SIZE;
  438. unlock_page(page);
  439. }
  440. /*
  441. * The no. of pages is less than our desired, that would be a
  442. * hole in there.
  443. */
  444. if (nr_pages < num && whence == SEEK_HOLE) {
  445. found = 1;
  446. *offset = lastoff;
  447. break;
  448. }
  449. index = pvec.pages[i - 1]->index + 1;
  450. pagevec_release(&pvec);
  451. } while (index <= end);
  452. out:
  453. pagevec_release(&pvec);
  454. return found;
  455. }
  456. /*
  457. * ext4_seek_data() retrieves the offset for SEEK_DATA.
  458. */
  459. static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
  460. {
  461. struct inode *inode = file->f_mapping->host;
  462. struct ext4_map_blocks map;
  463. struct extent_status es;
  464. ext4_lblk_t start, last, end;
  465. loff_t dataoff, isize;
  466. int blkbits;
  467. int ret = 0;
  468. mutex_lock(&inode->i_mutex);
  469. isize = i_size_read(inode);
  470. if (offset >= isize) {
  471. mutex_unlock(&inode->i_mutex);
  472. return -ENXIO;
  473. }
  474. blkbits = inode->i_sb->s_blocksize_bits;
  475. start = offset >> blkbits;
  476. last = start;
  477. end = isize >> blkbits;
  478. dataoff = offset;
  479. do {
  480. map.m_lblk = last;
  481. map.m_len = end - last + 1;
  482. ret = ext4_map_blocks(NULL, inode, &map, 0);
  483. if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
  484. if (last != start)
  485. dataoff = (loff_t)last << blkbits;
  486. break;
  487. }
  488. /*
  489. * If there is a delay extent at this offset,
  490. * it will be as a data.
  491. */
  492. ext4_es_find_delayed_extent_range(inode, last, last, &es);
  493. if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
  494. if (last != start)
  495. dataoff = (loff_t)last << blkbits;
  496. break;
  497. }
  498. /*
  499. * If there is a unwritten extent at this offset,
  500. * it will be as a data or a hole according to page
  501. * cache that has data or not.
  502. */
  503. if (map.m_flags & EXT4_MAP_UNWRITTEN) {
  504. int unwritten;
  505. unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
  506. &map, &dataoff);
  507. if (unwritten)
  508. break;
  509. }
  510. last++;
  511. dataoff = (loff_t)last << blkbits;
  512. } while (last <= end);
  513. mutex_unlock(&inode->i_mutex);
  514. if (dataoff > isize)
  515. return -ENXIO;
  516. return vfs_setpos(file, dataoff, maxsize);
  517. }
  518. /*
  519. * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  520. */
  521. static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
  522. {
  523. struct inode *inode = file->f_mapping->host;
  524. struct ext4_map_blocks map;
  525. struct extent_status es;
  526. ext4_lblk_t start, last, end;
  527. loff_t holeoff, isize;
  528. int blkbits;
  529. int ret = 0;
  530. mutex_lock(&inode->i_mutex);
  531. isize = i_size_read(inode);
  532. if (offset >= isize) {
  533. mutex_unlock(&inode->i_mutex);
  534. return -ENXIO;
  535. }
  536. blkbits = inode->i_sb->s_blocksize_bits;
  537. start = offset >> blkbits;
  538. last = start;
  539. end = isize >> blkbits;
  540. holeoff = offset;
  541. do {
  542. map.m_lblk = last;
  543. map.m_len = end - last + 1;
  544. ret = ext4_map_blocks(NULL, inode, &map, 0);
  545. if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
  546. last += ret;
  547. holeoff = (loff_t)last << blkbits;
  548. continue;
  549. }
  550. /*
  551. * If there is a delay extent at this offset,
  552. * we will skip this extent.
  553. */
  554. ext4_es_find_delayed_extent_range(inode, last, last, &es);
  555. if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
  556. last = es.es_lblk + es.es_len;
  557. holeoff = (loff_t)last << blkbits;
  558. continue;
  559. }
  560. /*
  561. * If there is a unwritten extent at this offset,
  562. * it will be as a data or a hole according to page
  563. * cache that has data or not.
  564. */
  565. if (map.m_flags & EXT4_MAP_UNWRITTEN) {
  566. int unwritten;
  567. unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
  568. &map, &holeoff);
  569. if (!unwritten) {
  570. last += ret;
  571. holeoff = (loff_t)last << blkbits;
  572. continue;
  573. }
  574. }
  575. /* find a hole */
  576. break;
  577. } while (last <= end);
  578. mutex_unlock(&inode->i_mutex);
  579. if (holeoff > isize)
  580. holeoff = isize;
  581. return vfs_setpos(file, holeoff, maxsize);
  582. }
  583. /*
  584. * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
  585. * by calling generic_file_llseek_size() with the appropriate maxbytes
  586. * value for each.
  587. */
  588. loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
  589. {
  590. struct inode *inode = file->f_mapping->host;
  591. loff_t maxbytes;
  592. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  593. maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
  594. else
  595. maxbytes = inode->i_sb->s_maxbytes;
  596. switch (whence) {
  597. case SEEK_SET:
  598. case SEEK_CUR:
  599. case SEEK_END:
  600. return generic_file_llseek_size(file, offset, whence,
  601. maxbytes, i_size_read(inode));
  602. case SEEK_DATA:
  603. return ext4_seek_data(file, offset, maxbytes);
  604. case SEEK_HOLE:
  605. return ext4_seek_hole(file, offset, maxbytes);
  606. }
  607. return -EINVAL;
  608. }
  609. const struct file_operations ext4_file_operations = {
  610. .llseek = ext4_llseek,
  611. .read_iter = generic_file_read_iter,
  612. .write_iter = ext4_file_write_iter,
  613. .unlocked_ioctl = ext4_ioctl,
  614. #ifdef CONFIG_COMPAT
  615. .compat_ioctl = ext4_compat_ioctl,
  616. #endif
  617. .mmap = ext4_file_mmap,
  618. .open = ext4_file_open,
  619. .release = ext4_release_file,
  620. .fsync = ext4_sync_file,
  621. .splice_read = generic_file_splice_read,
  622. .splice_write = iter_file_splice_write,
  623. .fallocate = ext4_fallocate,
  624. };
  625. const struct inode_operations ext4_file_inode_operations = {
  626. .setattr = ext4_setattr,
  627. .getattr = ext4_getattr,
  628. .setxattr = generic_setxattr,
  629. .getxattr = generic_getxattr,
  630. .listxattr = ext4_listxattr,
  631. .removexattr = generic_removexattr,
  632. .get_acl = ext4_get_acl,
  633. .set_acl = ext4_set_acl,
  634. .fiemap = ext4_fiemap,
  635. };