inline.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * fs/f2fs/inline.c
  3. * Copyright (c) 2013, Intel Corporation
  4. * Authors: Huajun Li <huajun.li@intel.com>
  5. * Haicheng Li <haicheng.li@intel.com>
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/f2fs_fs.h>
  12. #include "f2fs.h"
  13. bool f2fs_may_inline(struct inode *inode)
  14. {
  15. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  16. block_t nr_blocks;
  17. loff_t i_size;
  18. if (!test_opt(sbi, INLINE_DATA))
  19. return false;
  20. nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
  21. if (inode->i_blocks > nr_blocks)
  22. return false;
  23. i_size = i_size_read(inode);
  24. if (i_size > MAX_INLINE_DATA)
  25. return false;
  26. return true;
  27. }
  28. int f2fs_read_inline_data(struct inode *inode, struct page *page)
  29. {
  30. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  31. struct page *ipage;
  32. void *src_addr, *dst_addr;
  33. if (page->index) {
  34. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  35. goto out;
  36. }
  37. ipage = get_node_page(sbi, inode->i_ino);
  38. if (IS_ERR(ipage)) {
  39. unlock_page(page);
  40. return PTR_ERR(ipage);
  41. }
  42. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  43. /* Copy the whole inline data block */
  44. src_addr = inline_data_addr(ipage);
  45. dst_addr = kmap(page);
  46. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  47. kunmap(page);
  48. f2fs_put_page(ipage, 1);
  49. out:
  50. SetPageUptodate(page);
  51. unlock_page(page);
  52. return 0;
  53. }
  54. static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
  55. {
  56. int err;
  57. struct page *ipage;
  58. struct dnode_of_data dn;
  59. void *src_addr, *dst_addr;
  60. block_t new_blk_addr;
  61. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  62. struct f2fs_io_info fio = {
  63. .type = DATA,
  64. .rw = WRITE_SYNC | REQ_PRIO,
  65. };
  66. f2fs_lock_op(sbi);
  67. ipage = get_node_page(sbi, inode->i_ino);
  68. if (IS_ERR(ipage))
  69. return PTR_ERR(ipage);
  70. /*
  71. * i_addr[0] is not used for inline data,
  72. * so reserving new block will not destroy inline data
  73. */
  74. set_new_dnode(&dn, inode, ipage, NULL, 0);
  75. err = f2fs_reserve_block(&dn, 0);
  76. if (err) {
  77. f2fs_unlock_op(sbi);
  78. return err;
  79. }
  80. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  81. /* Copy the whole inline data block */
  82. src_addr = inline_data_addr(ipage);
  83. dst_addr = kmap(page);
  84. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  85. kunmap(page);
  86. SetPageUptodate(page);
  87. /* write data page to try to make data consistent */
  88. set_page_writeback(page);
  89. write_data_page(page, &dn, &new_blk_addr, &fio);
  90. update_extent_cache(new_blk_addr, &dn);
  91. f2fs_wait_on_page_writeback(page, DATA);
  92. /* clear inline data and flag after data writeback */
  93. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  94. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  95. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  96. stat_dec_inline_inode(inode);
  97. sync_inode_page(&dn);
  98. f2fs_put_dnode(&dn);
  99. f2fs_unlock_op(sbi);
  100. return err;
  101. }
  102. int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
  103. {
  104. struct page *page;
  105. int err;
  106. if (!f2fs_has_inline_data(inode))
  107. return 0;
  108. else if (to_size <= MAX_INLINE_DATA)
  109. return 0;
  110. page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
  111. if (!page)
  112. return -ENOMEM;
  113. err = __f2fs_convert_inline_data(inode, page);
  114. f2fs_put_page(page, 1);
  115. return err;
  116. }
  117. int f2fs_write_inline_data(struct inode *inode,
  118. struct page *page, unsigned size)
  119. {
  120. void *src_addr, *dst_addr;
  121. struct page *ipage;
  122. struct dnode_of_data dn;
  123. int err;
  124. set_new_dnode(&dn, inode, NULL, NULL, 0);
  125. err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
  126. if (err)
  127. return err;
  128. ipage = dn.inode_page;
  129. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  130. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  131. src_addr = kmap(page);
  132. dst_addr = inline_data_addr(ipage);
  133. memcpy(dst_addr, src_addr, size);
  134. kunmap(page);
  135. /* Release the first data block if it is allocated */
  136. if (!f2fs_has_inline_data(inode)) {
  137. truncate_data_blocks_range(&dn, 1);
  138. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  139. stat_inc_inline_inode(inode);
  140. }
  141. sync_inode_page(&dn);
  142. f2fs_put_dnode(&dn);
  143. return 0;
  144. }
  145. int recover_inline_data(struct inode *inode, struct page *npage)
  146. {
  147. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  148. struct f2fs_inode *ri = NULL;
  149. void *src_addr, *dst_addr;
  150. struct page *ipage;
  151. /*
  152. * The inline_data recovery policy is as follows.
  153. * [prev.] [next] of inline_data flag
  154. * o o -> recover inline_data
  155. * o x -> remove inline_data, and then recover data blocks
  156. * x o -> remove inline_data, and then recover inline_data
  157. * x x -> recover data blocks
  158. */
  159. if (IS_INODE(npage))
  160. ri = F2FS_INODE(npage);
  161. if (f2fs_has_inline_data(inode) &&
  162. ri && ri->i_inline & F2FS_INLINE_DATA) {
  163. process_inline:
  164. ipage = get_node_page(sbi, inode->i_ino);
  165. f2fs_bug_on(IS_ERR(ipage));
  166. src_addr = inline_data_addr(npage);
  167. dst_addr = inline_data_addr(ipage);
  168. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  169. update_inode(inode, ipage);
  170. f2fs_put_page(ipage, 1);
  171. return -1;
  172. }
  173. if (f2fs_has_inline_data(inode)) {
  174. ipage = get_node_page(sbi, inode->i_ino);
  175. f2fs_bug_on(IS_ERR(ipage));
  176. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  177. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  178. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  179. update_inode(inode, ipage);
  180. f2fs_put_page(ipage, 1);
  181. } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
  182. truncate_blocks(inode, 0);
  183. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  184. goto process_inline;
  185. }
  186. return 0;
  187. }