inline.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/inline.c
  4. * Copyright (c) 2013, Intel Corporation
  5. * Authors: Huajun Li <huajun.li@intel.com>
  6. * Haicheng Li <haicheng.li@intel.com>
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/f2fs_fs.h>
  10. #include "f2fs.h"
  11. #include "node.h"
  12. bool f2fs_may_inline_data(struct inode *inode)
  13. {
  14. if (f2fs_is_atomic_file(inode))
  15. return false;
  16. if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
  17. return false;
  18. if (i_size_read(inode) > MAX_INLINE_DATA(inode))
  19. return false;
  20. if (f2fs_post_read_required(inode))
  21. return false;
  22. return true;
  23. }
  24. bool f2fs_may_inline_dentry(struct inode *inode)
  25. {
  26. if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
  27. return false;
  28. if (!S_ISDIR(inode->i_mode))
  29. return false;
  30. return true;
  31. }
  32. void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
  33. {
  34. struct inode *inode = page->mapping->host;
  35. void *src_addr, *dst_addr;
  36. if (PageUptodate(page))
  37. return;
  38. f2fs_bug_on(F2FS_P_SB(page), page->index);
  39. zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
  40. /* Copy the whole inline data block */
  41. src_addr = inline_data_addr(inode, ipage);
  42. dst_addr = kmap_atomic(page);
  43. memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
  44. flush_dcache_page(page);
  45. kunmap_atomic(dst_addr);
  46. if (!PageUptodate(page))
  47. SetPageUptodate(page);
  48. }
  49. void f2fs_truncate_inline_inode(struct inode *inode,
  50. struct page *ipage, u64 from)
  51. {
  52. void *addr;
  53. if (from >= MAX_INLINE_DATA(inode))
  54. return;
  55. addr = inline_data_addr(inode, ipage);
  56. f2fs_wait_on_page_writeback(ipage, NODE, true);
  57. memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
  58. set_page_dirty(ipage);
  59. if (from == 0)
  60. clear_inode_flag(inode, FI_DATA_EXIST);
  61. }
  62. int f2fs_read_inline_data(struct inode *inode, struct page *page)
  63. {
  64. struct page *ipage;
  65. ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  66. if (IS_ERR(ipage)) {
  67. unlock_page(page);
  68. return PTR_ERR(ipage);
  69. }
  70. if (!f2fs_has_inline_data(inode)) {
  71. f2fs_put_page(ipage, 1);
  72. return -EAGAIN;
  73. }
  74. if (page->index)
  75. zero_user_segment(page, 0, PAGE_SIZE);
  76. else
  77. f2fs_do_read_inline_data(page, ipage);
  78. if (!PageUptodate(page))
  79. SetPageUptodate(page);
  80. f2fs_put_page(ipage, 1);
  81. unlock_page(page);
  82. return 0;
  83. }
  84. int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
  85. {
  86. struct f2fs_io_info fio = {
  87. .sbi = F2FS_I_SB(dn->inode),
  88. .ino = dn->inode->i_ino,
  89. .type = DATA,
  90. .op = REQ_OP_WRITE,
  91. .op_flags = REQ_SYNC | REQ_PRIO,
  92. .page = page,
  93. .encrypted_page = NULL,
  94. .io_type = FS_DATA_IO,
  95. };
  96. struct node_info ni;
  97. int dirty, err;
  98. if (!f2fs_exist_data(dn->inode))
  99. goto clear_out;
  100. err = f2fs_reserve_block(dn, 0);
  101. if (err)
  102. return err;
  103. err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
  104. if (err) {
  105. f2fs_put_dnode(dn);
  106. return err;
  107. }
  108. fio.version = ni.version;
  109. if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
  110. f2fs_put_dnode(dn);
  111. set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
  112. f2fs_msg(fio.sbi->sb, KERN_WARNING,
  113. "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
  114. "run fsck to fix.",
  115. __func__, dn->inode->i_ino, dn->data_blkaddr);
  116. return -EINVAL;
  117. }
  118. f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
  119. f2fs_do_read_inline_data(page, dn->inode_page);
  120. set_page_dirty(page);
  121. /* clear dirty state */
  122. dirty = clear_page_dirty_for_io(page);
  123. /* write data page to try to make data consistent */
  124. set_page_writeback(page);
  125. ClearPageError(page);
  126. fio.old_blkaddr = dn->data_blkaddr;
  127. set_inode_flag(dn->inode, FI_HOT_DATA);
  128. f2fs_outplace_write_data(dn, &fio);
  129. f2fs_wait_on_page_writeback(page, DATA, true);
  130. if (dirty) {
  131. inode_dec_dirty_pages(dn->inode);
  132. f2fs_remove_dirty_inode(dn->inode);
  133. }
  134. /* this converted inline_data should be recovered. */
  135. set_inode_flag(dn->inode, FI_APPEND_WRITE);
  136. /* clear inline data and flag after data writeback */
  137. f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
  138. clear_inline_node(dn->inode_page);
  139. clear_out:
  140. stat_dec_inline_inode(dn->inode);
  141. clear_inode_flag(dn->inode, FI_INLINE_DATA);
  142. f2fs_put_dnode(dn);
  143. return 0;
  144. }
  145. int f2fs_convert_inline_inode(struct inode *inode)
  146. {
  147. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  148. struct dnode_of_data dn;
  149. struct page *ipage, *page;
  150. int err = 0;
  151. if (!f2fs_has_inline_data(inode))
  152. return 0;
  153. page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
  154. if (!page)
  155. return -ENOMEM;
  156. f2fs_lock_op(sbi);
  157. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  158. if (IS_ERR(ipage)) {
  159. err = PTR_ERR(ipage);
  160. goto out;
  161. }
  162. set_new_dnode(&dn, inode, ipage, ipage, 0);
  163. if (f2fs_has_inline_data(inode))
  164. err = f2fs_convert_inline_page(&dn, page);
  165. f2fs_put_dnode(&dn);
  166. out:
  167. f2fs_unlock_op(sbi);
  168. f2fs_put_page(page, 1);
  169. f2fs_balance_fs(sbi, dn.node_changed);
  170. return err;
  171. }
  172. int f2fs_write_inline_data(struct inode *inode, struct page *page)
  173. {
  174. void *src_addr, *dst_addr;
  175. struct dnode_of_data dn;
  176. int err;
  177. set_new_dnode(&dn, inode, NULL, NULL, 0);
  178. err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
  179. if (err)
  180. return err;
  181. if (!f2fs_has_inline_data(inode)) {
  182. f2fs_put_dnode(&dn);
  183. return -EAGAIN;
  184. }
  185. f2fs_bug_on(F2FS_I_SB(inode), page->index);
  186. f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
  187. src_addr = kmap_atomic(page);
  188. dst_addr = inline_data_addr(inode, dn.inode_page);
  189. memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
  190. kunmap_atomic(src_addr);
  191. set_page_dirty(dn.inode_page);
  192. f2fs_clear_page_cache_dirty_tag(page);
  193. set_inode_flag(inode, FI_APPEND_WRITE);
  194. set_inode_flag(inode, FI_DATA_EXIST);
  195. clear_inline_node(dn.inode_page);
  196. f2fs_put_dnode(&dn);
  197. return 0;
  198. }
  199. bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
  200. {
  201. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  202. struct f2fs_inode *ri = NULL;
  203. void *src_addr, *dst_addr;
  204. struct page *ipage;
  205. /*
  206. * The inline_data recovery policy is as follows.
  207. * [prev.] [next] of inline_data flag
  208. * o o -> recover inline_data
  209. * o x -> remove inline_data, and then recover data blocks
  210. * x o -> remove inline_data, and then recover inline_data
  211. * x x -> recover data blocks
  212. */
  213. if (IS_INODE(npage))
  214. ri = F2FS_INODE(npage);
  215. if (f2fs_has_inline_data(inode) &&
  216. ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  217. process_inline:
  218. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  219. f2fs_bug_on(sbi, IS_ERR(ipage));
  220. f2fs_wait_on_page_writeback(ipage, NODE, true);
  221. src_addr = inline_data_addr(inode, npage);
  222. dst_addr = inline_data_addr(inode, ipage);
  223. memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
  224. set_inode_flag(inode, FI_INLINE_DATA);
  225. set_inode_flag(inode, FI_DATA_EXIST);
  226. set_page_dirty(ipage);
  227. f2fs_put_page(ipage, 1);
  228. return true;
  229. }
  230. if (f2fs_has_inline_data(inode)) {
  231. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  232. f2fs_bug_on(sbi, IS_ERR(ipage));
  233. f2fs_truncate_inline_inode(inode, ipage, 0);
  234. clear_inode_flag(inode, FI_INLINE_DATA);
  235. f2fs_put_page(ipage, 1);
  236. } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  237. if (f2fs_truncate_blocks(inode, 0, false, false))
  238. return false;
  239. goto process_inline;
  240. }
  241. return false;
  242. }
  243. struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
  244. struct fscrypt_name *fname, struct page **res_page)
  245. {
  246. struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
  247. struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
  248. struct f2fs_dir_entry *de;
  249. struct f2fs_dentry_ptr d;
  250. struct page *ipage;
  251. void *inline_dentry;
  252. f2fs_hash_t namehash;
  253. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  254. if (IS_ERR(ipage)) {
  255. *res_page = ipage;
  256. return NULL;
  257. }
  258. namehash = f2fs_dentry_hash(&name, fname);
  259. inline_dentry = inline_data_addr(dir, ipage);
  260. make_dentry_ptr_inline(dir, &d, inline_dentry);
  261. de = f2fs_find_target_dentry(fname, namehash, NULL, &d);
  262. unlock_page(ipage);
  263. if (de)
  264. *res_page = ipage;
  265. else
  266. f2fs_put_page(ipage, 0);
  267. return de;
  268. }
  269. int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
  270. struct page *ipage)
  271. {
  272. struct f2fs_dentry_ptr d;
  273. void *inline_dentry;
  274. inline_dentry = inline_data_addr(inode, ipage);
  275. make_dentry_ptr_inline(inode, &d, inline_dentry);
  276. f2fs_do_make_empty_dir(inode, parent, &d);
  277. set_page_dirty(ipage);
  278. /* update i_size to MAX_INLINE_DATA */
  279. if (i_size_read(inode) < MAX_INLINE_DATA(inode))
  280. f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
  281. return 0;
  282. }
  283. /*
  284. * NOTE: ipage is grabbed by caller, but if any error occurs, we should
  285. * release ipage in this function.
  286. */
  287. static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
  288. void *inline_dentry)
  289. {
  290. struct page *page;
  291. struct dnode_of_data dn;
  292. struct f2fs_dentry_block *dentry_blk;
  293. struct f2fs_dentry_ptr src, dst;
  294. int err;
  295. page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
  296. if (!page) {
  297. f2fs_put_page(ipage, 1);
  298. return -ENOMEM;
  299. }
  300. set_new_dnode(&dn, dir, ipage, NULL, 0);
  301. err = f2fs_reserve_block(&dn, 0);
  302. if (err)
  303. goto out;
  304. if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
  305. f2fs_put_dnode(&dn);
  306. set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
  307. f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
  308. "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
  309. "run fsck to fix.",
  310. __func__, dir->i_ino, dn.data_blkaddr);
  311. err = -EINVAL;
  312. goto out;
  313. }
  314. f2fs_wait_on_page_writeback(page, DATA, true);
  315. dentry_blk = page_address(page);
  316. make_dentry_ptr_inline(dir, &src, inline_dentry);
  317. make_dentry_ptr_block(dir, &dst, dentry_blk);
  318. /* copy data from inline dentry block to new dentry block */
  319. memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
  320. memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
  321. /*
  322. * we do not need to zero out remainder part of dentry and filename
  323. * field, since we have used bitmap for marking the usage status of
  324. * them, besides, we can also ignore copying/zeroing reserved space
  325. * of dentry block, because them haven't been used so far.
  326. */
  327. memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
  328. memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
  329. if (!PageUptodate(page))
  330. SetPageUptodate(page);
  331. set_page_dirty(page);
  332. /* clear inline dir and flag after data writeback */
  333. f2fs_truncate_inline_inode(dir, ipage, 0);
  334. stat_dec_inline_dir(dir);
  335. clear_inode_flag(dir, FI_INLINE_DENTRY);
  336. f2fs_i_depth_write(dir, 1);
  337. if (i_size_read(dir) < PAGE_SIZE)
  338. f2fs_i_size_write(dir, PAGE_SIZE);
  339. out:
  340. f2fs_put_page(page, 1);
  341. return err;
  342. }
  343. static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
  344. {
  345. struct f2fs_dentry_ptr d;
  346. unsigned long bit_pos = 0;
  347. int err = 0;
  348. make_dentry_ptr_inline(dir, &d, inline_dentry);
  349. while (bit_pos < d.max) {
  350. struct f2fs_dir_entry *de;
  351. struct qstr new_name;
  352. nid_t ino;
  353. umode_t fake_mode;
  354. if (!test_bit_le(bit_pos, d.bitmap)) {
  355. bit_pos++;
  356. continue;
  357. }
  358. de = &d.dentry[bit_pos];
  359. if (unlikely(!de->name_len)) {
  360. bit_pos++;
  361. continue;
  362. }
  363. new_name.name = d.filename[bit_pos];
  364. new_name.len = le16_to_cpu(de->name_len);
  365. ino = le32_to_cpu(de->ino);
  366. fake_mode = f2fs_get_de_type(de) << S_SHIFT;
  367. err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
  368. ino, fake_mode);
  369. if (err)
  370. goto punch_dentry_pages;
  371. bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
  372. }
  373. return 0;
  374. punch_dentry_pages:
  375. truncate_inode_pages(&dir->i_data, 0);
  376. f2fs_truncate_blocks(dir, 0, false, false);
  377. f2fs_remove_dirty_inode(dir);
  378. return err;
  379. }
  380. static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
  381. void *inline_dentry)
  382. {
  383. void *backup_dentry;
  384. int err;
  385. backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
  386. MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
  387. if (!backup_dentry) {
  388. f2fs_put_page(ipage, 1);
  389. return -ENOMEM;
  390. }
  391. memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
  392. f2fs_truncate_inline_inode(dir, ipage, 0);
  393. unlock_page(ipage);
  394. err = f2fs_add_inline_entries(dir, backup_dentry);
  395. if (err)
  396. goto recover;
  397. lock_page(ipage);
  398. stat_dec_inline_dir(dir);
  399. clear_inode_flag(dir, FI_INLINE_DENTRY);
  400. kfree(backup_dentry);
  401. return 0;
  402. recover:
  403. lock_page(ipage);
  404. f2fs_wait_on_page_writeback(ipage, NODE, true);
  405. memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
  406. f2fs_i_depth_write(dir, 0);
  407. f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
  408. set_page_dirty(ipage);
  409. f2fs_put_page(ipage, 1);
  410. kfree(backup_dentry);
  411. return err;
  412. }
  413. static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
  414. void *inline_dentry)
  415. {
  416. if (!F2FS_I(dir)->i_dir_level)
  417. return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
  418. else
  419. return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
  420. }
  421. int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
  422. const struct qstr *orig_name,
  423. struct inode *inode, nid_t ino, umode_t mode)
  424. {
  425. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  426. struct page *ipage;
  427. unsigned int bit_pos;
  428. f2fs_hash_t name_hash;
  429. void *inline_dentry = NULL;
  430. struct f2fs_dentry_ptr d;
  431. int slots = GET_DENTRY_SLOTS(new_name->len);
  432. struct page *page = NULL;
  433. int err = 0;
  434. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  435. if (IS_ERR(ipage))
  436. return PTR_ERR(ipage);
  437. inline_dentry = inline_data_addr(dir, ipage);
  438. make_dentry_ptr_inline(dir, &d, inline_dentry);
  439. bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
  440. if (bit_pos >= d.max) {
  441. err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
  442. if (err)
  443. return err;
  444. err = -EAGAIN;
  445. goto out;
  446. }
  447. if (inode) {
  448. down_write(&F2FS_I(inode)->i_sem);
  449. page = f2fs_init_inode_metadata(inode, dir, new_name,
  450. orig_name, ipage);
  451. if (IS_ERR(page)) {
  452. err = PTR_ERR(page);
  453. goto fail;
  454. }
  455. }
  456. f2fs_wait_on_page_writeback(ipage, NODE, true);
  457. name_hash = f2fs_dentry_hash(new_name, NULL);
  458. f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
  459. set_page_dirty(ipage);
  460. /* we don't need to mark_inode_dirty now */
  461. if (inode) {
  462. f2fs_i_pino_write(inode, dir->i_ino);
  463. f2fs_put_page(page, 1);
  464. }
  465. f2fs_update_parent_metadata(dir, inode, 0);
  466. fail:
  467. if (inode)
  468. up_write(&F2FS_I(inode)->i_sem);
  469. out:
  470. f2fs_put_page(ipage, 1);
  471. return err;
  472. }
  473. void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
  474. struct inode *dir, struct inode *inode)
  475. {
  476. struct f2fs_dentry_ptr d;
  477. void *inline_dentry;
  478. int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
  479. unsigned int bit_pos;
  480. int i;
  481. lock_page(page);
  482. f2fs_wait_on_page_writeback(page, NODE, true);
  483. inline_dentry = inline_data_addr(dir, page);
  484. make_dentry_ptr_inline(dir, &d, inline_dentry);
  485. bit_pos = dentry - d.dentry;
  486. for (i = 0; i < slots; i++)
  487. __clear_bit_le(bit_pos + i, d.bitmap);
  488. set_page_dirty(page);
  489. f2fs_put_page(page, 1);
  490. dir->i_ctime = dir->i_mtime = current_time(dir);
  491. f2fs_mark_inode_dirty_sync(dir, false);
  492. if (inode)
  493. f2fs_drop_nlink(dir, inode);
  494. }
  495. bool f2fs_empty_inline_dir(struct inode *dir)
  496. {
  497. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  498. struct page *ipage;
  499. unsigned int bit_pos = 2;
  500. void *inline_dentry;
  501. struct f2fs_dentry_ptr d;
  502. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  503. if (IS_ERR(ipage))
  504. return false;
  505. inline_dentry = inline_data_addr(dir, ipage);
  506. make_dentry_ptr_inline(dir, &d, inline_dentry);
  507. bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
  508. f2fs_put_page(ipage, 1);
  509. if (bit_pos < d.max)
  510. return false;
  511. return true;
  512. }
  513. int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
  514. struct fscrypt_str *fstr)
  515. {
  516. struct inode *inode = file_inode(file);
  517. struct page *ipage = NULL;
  518. struct f2fs_dentry_ptr d;
  519. void *inline_dentry = NULL;
  520. int err;
  521. make_dentry_ptr_inline(inode, &d, inline_dentry);
  522. if (ctx->pos == d.max)
  523. return 0;
  524. ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  525. if (IS_ERR(ipage))
  526. return PTR_ERR(ipage);
  527. inline_dentry = inline_data_addr(inode, ipage);
  528. make_dentry_ptr_inline(inode, &d, inline_dentry);
  529. err = f2fs_fill_dentries(ctx, &d, 0, fstr);
  530. if (!err)
  531. ctx->pos = d.max;
  532. f2fs_put_page(ipage, 1);
  533. return err < 0 ? err : 0;
  534. }
  535. int f2fs_inline_data_fiemap(struct inode *inode,
  536. struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
  537. {
  538. __u64 byteaddr, ilen;
  539. __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
  540. FIEMAP_EXTENT_LAST;
  541. struct node_info ni;
  542. struct page *ipage;
  543. int err = 0;
  544. ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  545. if (IS_ERR(ipage))
  546. return PTR_ERR(ipage);
  547. if (!f2fs_has_inline_data(inode)) {
  548. err = -EAGAIN;
  549. goto out;
  550. }
  551. ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
  552. if (start >= ilen)
  553. goto out;
  554. if (start + len < ilen)
  555. ilen = start + len;
  556. ilen -= start;
  557. err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
  558. if (err)
  559. goto out;
  560. byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
  561. byteaddr += (char *)inline_data_addr(inode, ipage) -
  562. (char *)F2FS_INODE(ipage);
  563. err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
  564. out:
  565. f2fs_put_page(ipage, 1);
  566. return err;
  567. }