file.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * linux/fs/ext4/file.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/file.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext4 fs regular file handling primitives
  16. *
  17. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  18. * (jj@sunsite.ms.mff.cuni.cz)
  19. */
  20. #include <linux/time.h>
  21. #include <linux/fs.h>
  22. #include <linux/mount.h>
  23. #include <linux/path.h>
  24. #include <linux/dax.h>
  25. #include <linux/quotaops.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/uio.h>
  28. #include "ext4.h"
  29. #include "ext4_jbd2.h"
  30. #include "xattr.h"
  31. #include "acl.h"
  32. /*
  33. * Called when an inode is released. Note that this is different
  34. * from ext4_file_open: open gets called at every open, but release
  35. * gets called only when /all/ the files are closed.
  36. */
  37. static int ext4_release_file(struct inode *inode, struct file *filp)
  38. {
  39. if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  40. ext4_alloc_da_blocks(inode);
  41. ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  42. }
  43. /* if we are the last writer on the inode, drop the block reservation */
  44. if ((filp->f_mode & FMODE_WRITE) &&
  45. (atomic_read(&inode->i_writecount) == 1) &&
  46. !EXT4_I(inode)->i_reserved_data_blocks)
  47. {
  48. down_write(&EXT4_I(inode)->i_data_sem);
  49. ext4_discard_preallocations(inode);
  50. up_write(&EXT4_I(inode)->i_data_sem);
  51. }
  52. if (is_dx(inode) && filp->private_data)
  53. ext4_htree_free_dir_info(filp->private_data);
  54. return 0;
  55. }
  56. static void ext4_unwritten_wait(struct inode *inode)
  57. {
  58. wait_queue_head_t *wq = ext4_ioend_wq(inode);
  59. wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
  60. }
  61. /*
  62. * This tests whether the IO in question is block-aligned or not.
  63. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
  64. * are converted to written only after the IO is complete. Until they are
  65. * mapped, these blocks appear as holes, so dio_zero_block() will assume that
  66. * it needs to zero out portions of the start and/or end block. If 2 AIO
  67. * threads are at work on the same unwritten block, they must be synchronized
  68. * or one thread will zero the other's data, causing corruption.
  69. */
  70. static int
  71. ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
  72. {
  73. struct super_block *sb = inode->i_sb;
  74. int blockmask = sb->s_blocksize - 1;
  75. if (pos >= i_size_read(inode))
  76. return 0;
  77. if ((pos | iov_iter_alignment(from)) & blockmask)
  78. return 1;
  79. return 0;
  80. }
  81. static ssize_t
  82. ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  83. {
  84. struct file *file = iocb->ki_filp;
  85. struct inode *inode = file_inode(iocb->ki_filp);
  86. struct blk_plug plug;
  87. int o_direct = iocb->ki_flags & IOCB_DIRECT;
  88. int unaligned_aio = 0;
  89. int overwrite = 0;
  90. ssize_t ret;
  91. inode_lock(inode);
  92. ret = generic_write_checks(iocb, from);
  93. if (ret <= 0)
  94. goto out;
  95. /*
  96. * Unaligned direct AIO must be serialized among each other as zeroing
  97. * of partial blocks of two competing unaligned AIOs can result in data
  98. * corruption.
  99. */
  100. if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
  101. !is_sync_kiocb(iocb) &&
  102. ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
  103. unaligned_aio = 1;
  104. ext4_unwritten_wait(inode);
  105. }
  106. /*
  107. * If we have encountered a bitmap-format file, the size limit
  108. * is smaller than s_maxbytes, which is for extent-mapped files.
  109. */
  110. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  111. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  112. if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
  113. ret = -EFBIG;
  114. goto out;
  115. }
  116. iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
  117. }
  118. iocb->private = &overwrite;
  119. if (o_direct) {
  120. size_t length = iov_iter_count(from);
  121. loff_t pos = iocb->ki_pos;
  122. blk_start_plug(&plug);
  123. /* check whether we do a DIO overwrite or not */
  124. if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
  125. !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
  126. struct ext4_map_blocks map;
  127. unsigned int blkbits = inode->i_blkbits;
  128. int err, len;
  129. map.m_lblk = pos >> blkbits;
  130. map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
  131. - map.m_lblk;
  132. len = map.m_len;
  133. err = ext4_map_blocks(NULL, inode, &map, 0);
  134. /*
  135. * 'err==len' means that all of blocks has
  136. * been preallocated no matter they are
  137. * initialized or not. For excluding
  138. * unwritten extents, we need to check
  139. * m_flags. There are two conditions that
  140. * indicate for initialized extents. 1) If we
  141. * hit extent cache, EXT4_MAP_MAPPED flag is
  142. * returned; 2) If we do a real lookup,
  143. * non-flags are returned. So we should check
  144. * these two conditions.
  145. */
  146. if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
  147. overwrite = 1;
  148. }
  149. }
  150. ret = __generic_file_write_iter(iocb, from);
  151. inode_unlock(inode);
  152. if (ret > 0)
  153. ret = generic_write_sync(iocb, ret);
  154. if (o_direct)
  155. blk_finish_plug(&plug);
  156. return ret;
  157. out:
  158. inode_unlock(inode);
  159. return ret;
  160. }
  161. #ifdef CONFIG_FS_DAX
  162. static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  163. {
  164. int result;
  165. handle_t *handle = NULL;
  166. struct inode *inode = file_inode(vma->vm_file);
  167. struct super_block *sb = inode->i_sb;
  168. bool write = vmf->flags & FAULT_FLAG_WRITE;
  169. if (write) {
  170. sb_start_pagefault(sb);
  171. file_update_time(vma->vm_file);
  172. down_read(&EXT4_I(inode)->i_mmap_sem);
  173. handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
  174. EXT4_DATA_TRANS_BLOCKS(sb));
  175. } else
  176. down_read(&EXT4_I(inode)->i_mmap_sem);
  177. if (IS_ERR(handle))
  178. result = VM_FAULT_SIGBUS;
  179. else
  180. result = dax_fault(vma, vmf, ext4_dax_get_block);
  181. if (write) {
  182. if (!IS_ERR(handle))
  183. ext4_journal_stop(handle);
  184. up_read(&EXT4_I(inode)->i_mmap_sem);
  185. sb_end_pagefault(sb);
  186. } else
  187. up_read(&EXT4_I(inode)->i_mmap_sem);
  188. return result;
  189. }
  190. static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
  191. pmd_t *pmd, unsigned int flags)
  192. {
  193. int result;
  194. handle_t *handle = NULL;
  195. struct inode *inode = file_inode(vma->vm_file);
  196. struct super_block *sb = inode->i_sb;
  197. bool write = flags & FAULT_FLAG_WRITE;
  198. if (write) {
  199. sb_start_pagefault(sb);
  200. file_update_time(vma->vm_file);
  201. down_read(&EXT4_I(inode)->i_mmap_sem);
  202. handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
  203. ext4_chunk_trans_blocks(inode,
  204. PMD_SIZE / PAGE_SIZE));
  205. } else
  206. down_read(&EXT4_I(inode)->i_mmap_sem);
  207. if (IS_ERR(handle))
  208. result = VM_FAULT_SIGBUS;
  209. else
  210. result = dax_pmd_fault(vma, addr, pmd, flags,
  211. ext4_dax_get_block);
  212. if (write) {
  213. if (!IS_ERR(handle))
  214. ext4_journal_stop(handle);
  215. up_read(&EXT4_I(inode)->i_mmap_sem);
  216. sb_end_pagefault(sb);
  217. } else
  218. up_read(&EXT4_I(inode)->i_mmap_sem);
  219. return result;
  220. }
  221. /*
  222. * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
  223. * handler we check for races agaist truncate. Note that since we cycle through
  224. * i_mmap_sem, we are sure that also any hole punching that began before we
  225. * were called is finished by now and so if it included part of the file we
  226. * are working on, our pte will get unmapped and the check for pte_same() in
  227. * wp_pfn_shared() fails. Thus fault gets retried and things work out as
  228. * desired.
  229. */
  230. static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
  231. struct vm_fault *vmf)
  232. {
  233. struct inode *inode = file_inode(vma->vm_file);
  234. struct super_block *sb = inode->i_sb;
  235. loff_t size;
  236. int ret;
  237. sb_start_pagefault(sb);
  238. file_update_time(vma->vm_file);
  239. down_read(&EXT4_I(inode)->i_mmap_sem);
  240. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  241. if (vmf->pgoff >= size)
  242. ret = VM_FAULT_SIGBUS;
  243. else
  244. ret = dax_pfn_mkwrite(vma, vmf);
  245. up_read(&EXT4_I(inode)->i_mmap_sem);
  246. sb_end_pagefault(sb);
  247. return ret;
  248. }
  249. static const struct vm_operations_struct ext4_dax_vm_ops = {
  250. .fault = ext4_dax_fault,
  251. .pmd_fault = ext4_dax_pmd_fault,
  252. .page_mkwrite = ext4_dax_fault,
  253. .pfn_mkwrite = ext4_dax_pfn_mkwrite,
  254. };
  255. #else
  256. #define ext4_dax_vm_ops ext4_file_vm_ops
  257. #endif
  258. static const struct vm_operations_struct ext4_file_vm_ops = {
  259. .fault = ext4_filemap_fault,
  260. .map_pages = filemap_map_pages,
  261. .page_mkwrite = ext4_page_mkwrite,
  262. };
  263. static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
  264. {
  265. struct inode *inode = file->f_mapping->host;
  266. if (ext4_encrypted_inode(inode)) {
  267. int err = fscrypt_get_encryption_info(inode);
  268. if (err)
  269. return 0;
  270. if (!fscrypt_has_encryption_key(inode))
  271. return -ENOKEY;
  272. }
  273. file_accessed(file);
  274. if (IS_DAX(file_inode(file))) {
  275. vma->vm_ops = &ext4_dax_vm_ops;
  276. vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
  277. } else {
  278. vma->vm_ops = &ext4_file_vm_ops;
  279. }
  280. return 0;
  281. }
  282. static int ext4_file_open(struct inode * inode, struct file * filp)
  283. {
  284. struct super_block *sb = inode->i_sb;
  285. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  286. struct vfsmount *mnt = filp->f_path.mnt;
  287. struct dentry *dir;
  288. struct path path;
  289. char buf[64], *cp;
  290. int ret;
  291. if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
  292. !(sb->s_flags & MS_RDONLY))) {
  293. sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
  294. /*
  295. * Sample where the filesystem has been mounted and
  296. * store it in the superblock for sysadmin convenience
  297. * when trying to sort through large numbers of block
  298. * devices or filesystem images.
  299. */
  300. memset(buf, 0, sizeof(buf));
  301. path.mnt = mnt;
  302. path.dentry = mnt->mnt_root;
  303. cp = d_path(&path, buf, sizeof(buf));
  304. if (!IS_ERR(cp)) {
  305. handle_t *handle;
  306. int err;
  307. handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
  308. if (IS_ERR(handle))
  309. return PTR_ERR(handle);
  310. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  311. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  312. if (err) {
  313. ext4_journal_stop(handle);
  314. return err;
  315. }
  316. strlcpy(sbi->s_es->s_last_mounted, cp,
  317. sizeof(sbi->s_es->s_last_mounted));
  318. ext4_handle_dirty_super(handle, sb);
  319. ext4_journal_stop(handle);
  320. }
  321. }
  322. if (ext4_encrypted_inode(inode)) {
  323. ret = fscrypt_get_encryption_info(inode);
  324. if (ret)
  325. return -EACCES;
  326. if (!fscrypt_has_encryption_key(inode))
  327. return -ENOKEY;
  328. }
  329. dir = dget_parent(file_dentry(filp));
  330. if (ext4_encrypted_inode(d_inode(dir)) &&
  331. !fscrypt_has_permitted_context(d_inode(dir), inode)) {
  332. ext4_warning(inode->i_sb,
  333. "Inconsistent encryption contexts: %lu/%lu",
  334. (unsigned long) d_inode(dir)->i_ino,
  335. (unsigned long) inode->i_ino);
  336. dput(dir);
  337. return -EPERM;
  338. }
  339. dput(dir);
  340. /*
  341. * Set up the jbd2_inode if we are opening the inode for
  342. * writing and the journal is present
  343. */
  344. if (filp->f_mode & FMODE_WRITE) {
  345. ret = ext4_inode_attach_jinode(inode);
  346. if (ret < 0)
  347. return ret;
  348. }
  349. return dquot_file_open(inode, filp);
  350. }
  351. /*
  352. * Here we use ext4_map_blocks() to get a block mapping for a extent-based
  353. * file rather than ext4_ext_walk_space() because we can introduce
  354. * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
  355. * function. When extent status tree has been fully implemented, it will
  356. * track all extent status for a file and we can directly use it to
  357. * retrieve the offset for SEEK_DATA/SEEK_HOLE.
  358. */
  359. /*
  360. * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
  361. * lookup page cache to check whether or not there has some data between
  362. * [startoff, endoff] because, if this range contains an unwritten extent,
  363. * we determine this extent as a data or a hole according to whether the
  364. * page cache has data or not.
  365. */
  366. static int ext4_find_unwritten_pgoff(struct inode *inode,
  367. int whence,
  368. ext4_lblk_t end_blk,
  369. loff_t *offset)
  370. {
  371. struct pagevec pvec;
  372. unsigned int blkbits;
  373. pgoff_t index;
  374. pgoff_t end;
  375. loff_t endoff;
  376. loff_t startoff;
  377. loff_t lastoff;
  378. int found = 0;
  379. blkbits = inode->i_sb->s_blocksize_bits;
  380. startoff = *offset;
  381. lastoff = startoff;
  382. endoff = (loff_t)end_blk << blkbits;
  383. index = startoff >> PAGE_SHIFT;
  384. end = endoff >> PAGE_SHIFT;
  385. pagevec_init(&pvec, 0);
  386. do {
  387. int i, num;
  388. unsigned long nr_pages;
  389. num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
  390. nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
  391. (pgoff_t)num);
  392. if (nr_pages == 0) {
  393. if (whence == SEEK_DATA)
  394. break;
  395. BUG_ON(whence != SEEK_HOLE);
  396. /*
  397. * If this is the first time to go into the loop and
  398. * offset is not beyond the end offset, it will be a
  399. * hole at this offset
  400. */
  401. if (lastoff == startoff || lastoff < endoff)
  402. found = 1;
  403. break;
  404. }
  405. /*
  406. * If this is the first time to go into the loop and
  407. * offset is smaller than the first page offset, it will be a
  408. * hole at this offset.
  409. */
  410. if (lastoff == startoff && whence == SEEK_HOLE &&
  411. lastoff < page_offset(pvec.pages[0])) {
  412. found = 1;
  413. break;
  414. }
  415. for (i = 0; i < nr_pages; i++) {
  416. struct page *page = pvec.pages[i];
  417. struct buffer_head *bh, *head;
  418. /*
  419. * If the current offset is not beyond the end of given
  420. * range, it will be a hole.
  421. */
  422. if (lastoff < endoff && whence == SEEK_HOLE &&
  423. page->index > end) {
  424. found = 1;
  425. *offset = lastoff;
  426. goto out;
  427. }
  428. lock_page(page);
  429. if (unlikely(page->mapping != inode->i_mapping)) {
  430. unlock_page(page);
  431. continue;
  432. }
  433. if (!page_has_buffers(page)) {
  434. unlock_page(page);
  435. continue;
  436. }
  437. if (page_has_buffers(page)) {
  438. lastoff = page_offset(page);
  439. bh = head = page_buffers(page);
  440. do {
  441. if (buffer_uptodate(bh) ||
  442. buffer_unwritten(bh)) {
  443. if (whence == SEEK_DATA)
  444. found = 1;
  445. } else {
  446. if (whence == SEEK_HOLE)
  447. found = 1;
  448. }
  449. if (found) {
  450. *offset = max_t(loff_t,
  451. startoff, lastoff);
  452. unlock_page(page);
  453. goto out;
  454. }
  455. lastoff += bh->b_size;
  456. bh = bh->b_this_page;
  457. } while (bh != head);
  458. }
  459. lastoff = page_offset(page) + PAGE_SIZE;
  460. unlock_page(page);
  461. }
  462. /*
  463. * The no. of pages is less than our desired, that would be a
  464. * hole in there.
  465. */
  466. if (nr_pages < num && whence == SEEK_HOLE) {
  467. found = 1;
  468. *offset = lastoff;
  469. break;
  470. }
  471. index = pvec.pages[i - 1]->index + 1;
  472. pagevec_release(&pvec);
  473. } while (index <= end);
  474. out:
  475. pagevec_release(&pvec);
  476. return found;
  477. }
  478. /*
  479. * ext4_seek_data() retrieves the offset for SEEK_DATA.
  480. */
  481. static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
  482. {
  483. struct inode *inode = file->f_mapping->host;
  484. struct extent_status es;
  485. ext4_lblk_t start, last, end;
  486. loff_t dataoff, isize;
  487. int blkbits;
  488. int ret;
  489. inode_lock(inode);
  490. isize = i_size_read(inode);
  491. if (offset >= isize) {
  492. inode_unlock(inode);
  493. return -ENXIO;
  494. }
  495. blkbits = inode->i_sb->s_blocksize_bits;
  496. start = offset >> blkbits;
  497. last = start;
  498. end = isize >> blkbits;
  499. dataoff = offset;
  500. do {
  501. ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
  502. if (ret <= 0) {
  503. /* No extent found -> no data */
  504. if (ret == 0)
  505. ret = -ENXIO;
  506. inode_unlock(inode);
  507. return ret;
  508. }
  509. last = es.es_lblk;
  510. if (last != start)
  511. dataoff = (loff_t)last << blkbits;
  512. if (!ext4_es_is_unwritten(&es))
  513. break;
  514. /*
  515. * If there is a unwritten extent at this offset,
  516. * it will be as a data or a hole according to page
  517. * cache that has data or not.
  518. */
  519. if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
  520. es.es_lblk + es.es_len, &dataoff))
  521. break;
  522. last += es.es_len;
  523. dataoff = (loff_t)last << blkbits;
  524. cond_resched();
  525. } while (last <= end);
  526. inode_unlock(inode);
  527. if (dataoff > isize)
  528. return -ENXIO;
  529. return vfs_setpos(file, dataoff, maxsize);
  530. }
  531. /*
  532. * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  533. */
  534. static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
  535. {
  536. struct inode *inode = file->f_mapping->host;
  537. struct extent_status es;
  538. ext4_lblk_t start, last, end;
  539. loff_t holeoff, isize;
  540. int blkbits;
  541. int ret;
  542. inode_lock(inode);
  543. isize = i_size_read(inode);
  544. if (offset >= isize) {
  545. inode_unlock(inode);
  546. return -ENXIO;
  547. }
  548. blkbits = inode->i_sb->s_blocksize_bits;
  549. start = offset >> blkbits;
  550. last = start;
  551. end = isize >> blkbits;
  552. holeoff = offset;
  553. do {
  554. ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
  555. if (ret < 0) {
  556. inode_unlock(inode);
  557. return ret;
  558. }
  559. /* Found a hole? */
  560. if (ret == 0 || es.es_lblk > last) {
  561. if (last != start)
  562. holeoff = (loff_t)last << blkbits;
  563. break;
  564. }
  565. /*
  566. * If there is a unwritten extent at this offset,
  567. * it will be as a data or a hole according to page
  568. * cache that has data or not.
  569. */
  570. if (ext4_es_is_unwritten(&es) &&
  571. ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
  572. last + es.es_len, &holeoff))
  573. break;
  574. last += es.es_len;
  575. holeoff = (loff_t)last << blkbits;
  576. cond_resched();
  577. } while (last <= end);
  578. inode_unlock(inode);
  579. if (holeoff > isize)
  580. holeoff = isize;
  581. return vfs_setpos(file, holeoff, maxsize);
  582. }
  583. /*
  584. * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
  585. * by calling generic_file_llseek_size() with the appropriate maxbytes
  586. * value for each.
  587. */
  588. loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
  589. {
  590. struct inode *inode = file->f_mapping->host;
  591. loff_t maxbytes;
  592. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  593. maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
  594. else
  595. maxbytes = inode->i_sb->s_maxbytes;
  596. switch (whence) {
  597. case SEEK_SET:
  598. case SEEK_CUR:
  599. case SEEK_END:
  600. return generic_file_llseek_size(file, offset, whence,
  601. maxbytes, i_size_read(inode));
  602. case SEEK_DATA:
  603. return ext4_seek_data(file, offset, maxbytes);
  604. case SEEK_HOLE:
  605. return ext4_seek_hole(file, offset, maxbytes);
  606. }
  607. return -EINVAL;
  608. }
  609. const struct file_operations ext4_file_operations = {
  610. .llseek = ext4_llseek,
  611. .read_iter = generic_file_read_iter,
  612. .write_iter = ext4_file_write_iter,
  613. .unlocked_ioctl = ext4_ioctl,
  614. #ifdef CONFIG_COMPAT
  615. .compat_ioctl = ext4_compat_ioctl,
  616. #endif
  617. .mmap = ext4_file_mmap,
  618. .open = ext4_file_open,
  619. .release = ext4_release_file,
  620. .fsync = ext4_sync_file,
  621. .splice_read = generic_file_splice_read,
  622. .splice_write = iter_file_splice_write,
  623. .fallocate = ext4_fallocate,
  624. };
  625. const struct inode_operations ext4_file_inode_operations = {
  626. .setattr = ext4_setattr,
  627. .getattr = ext4_getattr,
  628. .setxattr = generic_setxattr,
  629. .getxattr = generic_getxattr,
  630. .listxattr = ext4_listxattr,
  631. .removexattr = generic_removexattr,
  632. .get_acl = ext4_get_acl,
  633. .set_acl = ext4_set_acl,
  634. .fiemap = ext4_fiemap,
  635. };