file.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /*
  2. * linux/fs/ext4/file.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/file.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext4 fs regular file handling primitives
  16. *
  17. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  18. * (jj@sunsite.ms.mff.cuni.cz)
  19. */
  20. #include <linux/time.h>
  21. #include <linux/fs.h>
  22. #include <linux/jbd2.h>
  23. #include <linux/mount.h>
  24. #include <linux/path.h>
  25. #include <linux/aio.h>
  26. #include <linux/quotaops.h>
  27. #include <linux/pagevec.h>
  28. #include "ext4.h"
  29. #include "ext4_jbd2.h"
  30. #include "xattr.h"
  31. #include "acl.h"
  32. /*
  33. * Called when an inode is released. Note that this is different
  34. * from ext4_file_open: open gets called at every open, but release
  35. * gets called only when /all/ the files are closed.
  36. */
  37. static int ext4_release_file(struct inode *inode, struct file *filp)
  38. {
  39. if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  40. ext4_alloc_da_blocks(inode);
  41. ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  42. }
  43. /* if we are the last writer on the inode, drop the block reservation */
  44. if ((filp->f_mode & FMODE_WRITE) &&
  45. (atomic_read(&inode->i_writecount) == 1) &&
  46. !EXT4_I(inode)->i_reserved_data_blocks)
  47. {
  48. down_write(&EXT4_I(inode)->i_data_sem);
  49. ext4_discard_preallocations(inode);
  50. up_write(&EXT4_I(inode)->i_data_sem);
  51. }
  52. if (is_dx(inode) && filp->private_data)
  53. ext4_htree_free_dir_info(filp->private_data);
  54. return 0;
  55. }
  56. static void ext4_unwritten_wait(struct inode *inode)
  57. {
  58. wait_queue_head_t *wq = ext4_ioend_wq(inode);
  59. wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
  60. }
  61. /*
  62. * This tests whether the IO in question is block-aligned or not.
  63. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
  64. * are converted to written only after the IO is complete. Until they are
  65. * mapped, these blocks appear as holes, so dio_zero_block() will assume that
  66. * it needs to zero out portions of the start and/or end block. If 2 AIO
  67. * threads are at work on the same unwritten block, they must be synchronized
  68. * or one thread will zero the other's data, causing corruption.
  69. */
  70. static int
  71. ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
  72. {
  73. struct super_block *sb = inode->i_sb;
  74. int blockmask = sb->s_blocksize - 1;
  75. if (pos >= i_size_read(inode))
  76. return 0;
  77. if ((pos | iov_iter_alignment(from)) & blockmask)
  78. return 1;
  79. return 0;
  80. }
  81. static ssize_t
  82. ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  83. {
  84. struct file *file = iocb->ki_filp;
  85. struct inode *inode = file_inode(iocb->ki_filp);
  86. struct mutex *aio_mutex = NULL;
  87. struct blk_plug plug;
  88. int o_direct = io_is_direct(file);
  89. int overwrite = 0;
  90. size_t length = iov_iter_count(from);
  91. ssize_t ret;
  92. loff_t pos = iocb->ki_pos;
  93. /*
  94. * Unaligned direct AIO must be serialized; see comment above
  95. * In the case of O_APPEND, assume that we must always serialize
  96. */
  97. if (o_direct &&
  98. ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
  99. !is_sync_kiocb(iocb) &&
  100. (file->f_flags & O_APPEND ||
  101. ext4_unaligned_aio(inode, from, pos))) {
  102. aio_mutex = ext4_aio_mutex(inode);
  103. mutex_lock(aio_mutex);
  104. ext4_unwritten_wait(inode);
  105. }
  106. mutex_lock(&inode->i_mutex);
  107. if (file->f_flags & O_APPEND)
  108. iocb->ki_pos = pos = i_size_read(inode);
  109. /*
  110. * If we have encountered a bitmap-format file, the size limit
  111. * is smaller than s_maxbytes, which is for extent-mapped files.
  112. */
  113. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  114. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  115. if ((pos > sbi->s_bitmap_maxbytes) ||
  116. (pos == sbi->s_bitmap_maxbytes && length > 0)) {
  117. mutex_unlock(&inode->i_mutex);
  118. ret = -EFBIG;
  119. goto errout;
  120. }
  121. if (pos + length > sbi->s_bitmap_maxbytes)
  122. iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
  123. }
  124. iocb->private = &overwrite;
  125. if (o_direct) {
  126. blk_start_plug(&plug);
  127. /* check whether we do a DIO overwrite or not */
  128. if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
  129. !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
  130. struct ext4_map_blocks map;
  131. unsigned int blkbits = inode->i_blkbits;
  132. int err, len;
  133. map.m_lblk = pos >> blkbits;
  134. map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
  135. - map.m_lblk;
  136. len = map.m_len;
  137. err = ext4_map_blocks(NULL, inode, &map, 0);
  138. /*
  139. * 'err==len' means that all of blocks has
  140. * been preallocated no matter they are
  141. * initialized or not. For excluding
  142. * unwritten extents, we need to check
  143. * m_flags. There are two conditions that
  144. * indicate for initialized extents. 1) If we
  145. * hit extent cache, EXT4_MAP_MAPPED flag is
  146. * returned; 2) If we do a real lookup,
  147. * non-flags are returned. So we should check
  148. * these two conditions.
  149. */
  150. if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
  151. overwrite = 1;
  152. }
  153. }
  154. ret = __generic_file_write_iter(iocb, from);
  155. mutex_unlock(&inode->i_mutex);
  156. if (ret > 0) {
  157. ssize_t err;
  158. err = generic_write_sync(file, iocb->ki_pos - ret, ret);
  159. if (err < 0)
  160. ret = err;
  161. }
  162. if (o_direct)
  163. blk_finish_plug(&plug);
  164. errout:
  165. if (aio_mutex)
  166. mutex_unlock(aio_mutex);
  167. return ret;
  168. }
  169. #ifdef CONFIG_FS_DAX
  170. static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  171. {
  172. return dax_fault(vma, vmf, ext4_get_block);
  173. /* Is this the right get_block? */
  174. }
  175. static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  176. {
  177. return dax_mkwrite(vma, vmf, ext4_get_block);
  178. }
  179. static const struct vm_operations_struct ext4_dax_vm_ops = {
  180. .fault = ext4_dax_fault,
  181. .page_mkwrite = ext4_dax_mkwrite,
  182. };
  183. #else
  184. #define ext4_dax_vm_ops ext4_file_vm_ops
  185. #endif
  186. static const struct vm_operations_struct ext4_file_vm_ops = {
  187. .fault = filemap_fault,
  188. .map_pages = filemap_map_pages,
  189. .page_mkwrite = ext4_page_mkwrite,
  190. };
  191. static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
  192. {
  193. file_accessed(file);
  194. if (IS_DAX(file_inode(file))) {
  195. vma->vm_ops = &ext4_dax_vm_ops;
  196. vma->vm_flags |= VM_MIXEDMAP;
  197. } else {
  198. vma->vm_ops = &ext4_file_vm_ops;
  199. }
  200. return 0;
  201. }
  202. static int ext4_file_open(struct inode * inode, struct file * filp)
  203. {
  204. struct super_block *sb = inode->i_sb;
  205. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  206. struct vfsmount *mnt = filp->f_path.mnt;
  207. struct path path;
  208. char buf[64], *cp;
  209. if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
  210. !(sb->s_flags & MS_RDONLY))) {
  211. sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
  212. /*
  213. * Sample where the filesystem has been mounted and
  214. * store it in the superblock for sysadmin convenience
  215. * when trying to sort through large numbers of block
  216. * devices or filesystem images.
  217. */
  218. memset(buf, 0, sizeof(buf));
  219. path.mnt = mnt;
  220. path.dentry = mnt->mnt_root;
  221. cp = d_path(&path, buf, sizeof(buf));
  222. if (!IS_ERR(cp)) {
  223. handle_t *handle;
  224. int err;
  225. handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
  226. if (IS_ERR(handle))
  227. return PTR_ERR(handle);
  228. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  229. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  230. if (err) {
  231. ext4_journal_stop(handle);
  232. return err;
  233. }
  234. strlcpy(sbi->s_es->s_last_mounted, cp,
  235. sizeof(sbi->s_es->s_last_mounted));
  236. ext4_handle_dirty_super(handle, sb);
  237. ext4_journal_stop(handle);
  238. }
  239. }
  240. /*
  241. * Set up the jbd2_inode if we are opening the inode for
  242. * writing and the journal is present
  243. */
  244. if (filp->f_mode & FMODE_WRITE) {
  245. int ret = ext4_inode_attach_jinode(inode);
  246. if (ret < 0)
  247. return ret;
  248. }
  249. return dquot_file_open(inode, filp);
  250. }
  251. /*
  252. * Here we use ext4_map_blocks() to get a block mapping for a extent-based
  253. * file rather than ext4_ext_walk_space() because we can introduce
  254. * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
  255. * function. When extent status tree has been fully implemented, it will
  256. * track all extent status for a file and we can directly use it to
  257. * retrieve the offset for SEEK_DATA/SEEK_HOLE.
  258. */
  259. /*
  260. * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
  261. * lookup page cache to check whether or not there has some data between
  262. * [startoff, endoff] because, if this range contains an unwritten extent,
  263. * we determine this extent as a data or a hole according to whether the
  264. * page cache has data or not.
  265. */
  266. static int ext4_find_unwritten_pgoff(struct inode *inode,
  267. int whence,
  268. struct ext4_map_blocks *map,
  269. loff_t *offset)
  270. {
  271. struct pagevec pvec;
  272. unsigned int blkbits;
  273. pgoff_t index;
  274. pgoff_t end;
  275. loff_t endoff;
  276. loff_t startoff;
  277. loff_t lastoff;
  278. int found = 0;
  279. blkbits = inode->i_sb->s_blocksize_bits;
  280. startoff = *offset;
  281. lastoff = startoff;
  282. endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
  283. index = startoff >> PAGE_CACHE_SHIFT;
  284. end = endoff >> PAGE_CACHE_SHIFT;
  285. pagevec_init(&pvec, 0);
  286. do {
  287. int i, num;
  288. unsigned long nr_pages;
  289. num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
  290. nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
  291. (pgoff_t)num);
  292. if (nr_pages == 0) {
  293. if (whence == SEEK_DATA)
  294. break;
  295. BUG_ON(whence != SEEK_HOLE);
  296. /*
  297. * If this is the first time to go into the loop and
  298. * offset is not beyond the end offset, it will be a
  299. * hole at this offset
  300. */
  301. if (lastoff == startoff || lastoff < endoff)
  302. found = 1;
  303. break;
  304. }
  305. /*
  306. * If this is the first time to go into the loop and
  307. * offset is smaller than the first page offset, it will be a
  308. * hole at this offset.
  309. */
  310. if (lastoff == startoff && whence == SEEK_HOLE &&
  311. lastoff < page_offset(pvec.pages[0])) {
  312. found = 1;
  313. break;
  314. }
  315. for (i = 0; i < nr_pages; i++) {
  316. struct page *page = pvec.pages[i];
  317. struct buffer_head *bh, *head;
  318. /*
  319. * If the current offset is not beyond the end of given
  320. * range, it will be a hole.
  321. */
  322. if (lastoff < endoff && whence == SEEK_HOLE &&
  323. page->index > end) {
  324. found = 1;
  325. *offset = lastoff;
  326. goto out;
  327. }
  328. lock_page(page);
  329. if (unlikely(page->mapping != inode->i_mapping)) {
  330. unlock_page(page);
  331. continue;
  332. }
  333. if (!page_has_buffers(page)) {
  334. unlock_page(page);
  335. continue;
  336. }
  337. if (page_has_buffers(page)) {
  338. lastoff = page_offset(page);
  339. bh = head = page_buffers(page);
  340. do {
  341. if (buffer_uptodate(bh) ||
  342. buffer_unwritten(bh)) {
  343. if (whence == SEEK_DATA)
  344. found = 1;
  345. } else {
  346. if (whence == SEEK_HOLE)
  347. found = 1;
  348. }
  349. if (found) {
  350. *offset = max_t(loff_t,
  351. startoff, lastoff);
  352. unlock_page(page);
  353. goto out;
  354. }
  355. lastoff += bh->b_size;
  356. bh = bh->b_this_page;
  357. } while (bh != head);
  358. }
  359. lastoff = page_offset(page) + PAGE_SIZE;
  360. unlock_page(page);
  361. }
  362. /*
  363. * The no. of pages is less than our desired, that would be a
  364. * hole in there.
  365. */
  366. if (nr_pages < num && whence == SEEK_HOLE) {
  367. found = 1;
  368. *offset = lastoff;
  369. break;
  370. }
  371. index = pvec.pages[i - 1]->index + 1;
  372. pagevec_release(&pvec);
  373. } while (index <= end);
  374. out:
  375. pagevec_release(&pvec);
  376. return found;
  377. }
  378. /*
  379. * ext4_seek_data() retrieves the offset for SEEK_DATA.
  380. */
  381. static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
  382. {
  383. struct inode *inode = file->f_mapping->host;
  384. struct ext4_map_blocks map;
  385. struct extent_status es;
  386. ext4_lblk_t start, last, end;
  387. loff_t dataoff, isize;
  388. int blkbits;
  389. int ret = 0;
  390. mutex_lock(&inode->i_mutex);
  391. isize = i_size_read(inode);
  392. if (offset >= isize) {
  393. mutex_unlock(&inode->i_mutex);
  394. return -ENXIO;
  395. }
  396. blkbits = inode->i_sb->s_blocksize_bits;
  397. start = offset >> blkbits;
  398. last = start;
  399. end = isize >> blkbits;
  400. dataoff = offset;
  401. do {
  402. map.m_lblk = last;
  403. map.m_len = end - last + 1;
  404. ret = ext4_map_blocks(NULL, inode, &map, 0);
  405. if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
  406. if (last != start)
  407. dataoff = (loff_t)last << blkbits;
  408. break;
  409. }
  410. /*
  411. * If there is a delay extent at this offset,
  412. * it will be as a data.
  413. */
  414. ext4_es_find_delayed_extent_range(inode, last, last, &es);
  415. if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
  416. if (last != start)
  417. dataoff = (loff_t)last << blkbits;
  418. break;
  419. }
  420. /*
  421. * If there is a unwritten extent at this offset,
  422. * it will be as a data or a hole according to page
  423. * cache that has data or not.
  424. */
  425. if (map.m_flags & EXT4_MAP_UNWRITTEN) {
  426. int unwritten;
  427. unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
  428. &map, &dataoff);
  429. if (unwritten)
  430. break;
  431. }
  432. last++;
  433. dataoff = (loff_t)last << blkbits;
  434. } while (last <= end);
  435. mutex_unlock(&inode->i_mutex);
  436. if (dataoff > isize)
  437. return -ENXIO;
  438. return vfs_setpos(file, dataoff, maxsize);
  439. }
  440. /*
  441. * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  442. */
  443. static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
  444. {
  445. struct inode *inode = file->f_mapping->host;
  446. struct ext4_map_blocks map;
  447. struct extent_status es;
  448. ext4_lblk_t start, last, end;
  449. loff_t holeoff, isize;
  450. int blkbits;
  451. int ret = 0;
  452. mutex_lock(&inode->i_mutex);
  453. isize = i_size_read(inode);
  454. if (offset >= isize) {
  455. mutex_unlock(&inode->i_mutex);
  456. return -ENXIO;
  457. }
  458. blkbits = inode->i_sb->s_blocksize_bits;
  459. start = offset >> blkbits;
  460. last = start;
  461. end = isize >> blkbits;
  462. holeoff = offset;
  463. do {
  464. map.m_lblk = last;
  465. map.m_len = end - last + 1;
  466. ret = ext4_map_blocks(NULL, inode, &map, 0);
  467. if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
  468. last += ret;
  469. holeoff = (loff_t)last << blkbits;
  470. continue;
  471. }
  472. /*
  473. * If there is a delay extent at this offset,
  474. * we will skip this extent.
  475. */
  476. ext4_es_find_delayed_extent_range(inode, last, last, &es);
  477. if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
  478. last = es.es_lblk + es.es_len;
  479. holeoff = (loff_t)last << blkbits;
  480. continue;
  481. }
  482. /*
  483. * If there is a unwritten extent at this offset,
  484. * it will be as a data or a hole according to page
  485. * cache that has data or not.
  486. */
  487. if (map.m_flags & EXT4_MAP_UNWRITTEN) {
  488. int unwritten;
  489. unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
  490. &map, &holeoff);
  491. if (!unwritten) {
  492. last += ret;
  493. holeoff = (loff_t)last << blkbits;
  494. continue;
  495. }
  496. }
  497. /* find a hole */
  498. break;
  499. } while (last <= end);
  500. mutex_unlock(&inode->i_mutex);
  501. if (holeoff > isize)
  502. holeoff = isize;
  503. return vfs_setpos(file, holeoff, maxsize);
  504. }
  505. /*
  506. * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
  507. * by calling generic_file_llseek_size() with the appropriate maxbytes
  508. * value for each.
  509. */
  510. loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
  511. {
  512. struct inode *inode = file->f_mapping->host;
  513. loff_t maxbytes;
  514. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  515. maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
  516. else
  517. maxbytes = inode->i_sb->s_maxbytes;
  518. switch (whence) {
  519. case SEEK_SET:
  520. case SEEK_CUR:
  521. case SEEK_END:
  522. return generic_file_llseek_size(file, offset, whence,
  523. maxbytes, i_size_read(inode));
  524. case SEEK_DATA:
  525. return ext4_seek_data(file, offset, maxbytes);
  526. case SEEK_HOLE:
  527. return ext4_seek_hole(file, offset, maxbytes);
  528. }
  529. return -EINVAL;
  530. }
  531. const struct file_operations ext4_file_operations = {
  532. .llseek = ext4_llseek,
  533. .read = new_sync_read,
  534. .write = new_sync_write,
  535. .read_iter = generic_file_read_iter,
  536. .write_iter = ext4_file_write_iter,
  537. .unlocked_ioctl = ext4_ioctl,
  538. #ifdef CONFIG_COMPAT
  539. .compat_ioctl = ext4_compat_ioctl,
  540. #endif
  541. .mmap = ext4_file_mmap,
  542. .open = ext4_file_open,
  543. .release = ext4_release_file,
  544. .fsync = ext4_sync_file,
  545. .splice_read = generic_file_splice_read,
  546. .splice_write = iter_file_splice_write,
  547. .fallocate = ext4_fallocate,
  548. };
  549. #ifdef CONFIG_FS_DAX
  550. const struct file_operations ext4_dax_file_operations = {
  551. .llseek = ext4_llseek,
  552. .read = new_sync_read,
  553. .write = new_sync_write,
  554. .read_iter = generic_file_read_iter,
  555. .write_iter = ext4_file_write_iter,
  556. .unlocked_ioctl = ext4_ioctl,
  557. #ifdef CONFIG_COMPAT
  558. .compat_ioctl = ext4_compat_ioctl,
  559. #endif
  560. .mmap = ext4_file_mmap,
  561. .open = ext4_file_open,
  562. .release = ext4_release_file,
  563. .fsync = ext4_sync_file,
  564. /* Splice not yet supported with DAX */
  565. .fallocate = ext4_fallocate,
  566. };
  567. #endif
  568. const struct inode_operations ext4_file_inode_operations = {
  569. .setattr = ext4_setattr,
  570. .getattr = ext4_getattr,
  571. .setxattr = generic_setxattr,
  572. .getxattr = generic_getxattr,
  573. .listxattr = ext4_listxattr,
  574. .removexattr = generic_removexattr,
  575. .get_acl = ext4_get_acl,
  576. .set_acl = ext4_set_acl,
  577. .fiemap = ext4_fiemap,
  578. };