inline.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * fs/f2fs/inline.c
  3. * Copyright (c) 2013, Intel Corporation
  4. * Authors: Huajun Li <huajun.li@intel.com>
  5. * Haicheng Li <haicheng.li@intel.com>
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/f2fs_fs.h>
  12. #include "f2fs.h"
  13. bool f2fs_may_inline(struct inode *inode)
  14. {
  15. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  16. block_t nr_blocks;
  17. loff_t i_size;
  18. if (!test_opt(sbi, INLINE_DATA))
  19. return false;
  20. nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
  21. if (inode->i_blocks > nr_blocks)
  22. return false;
  23. i_size = i_size_read(inode);
  24. if (i_size > MAX_INLINE_DATA)
  25. return false;
  26. return true;
  27. }
  28. int f2fs_read_inline_data(struct inode *inode, struct page *page)
  29. {
  30. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  31. struct page *ipage;
  32. void *src_addr, *dst_addr;
  33. if (page->index) {
  34. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  35. goto out;
  36. }
  37. ipage = get_node_page(sbi, inode->i_ino);
  38. if (IS_ERR(ipage)) {
  39. unlock_page(page);
  40. return PTR_ERR(ipage);
  41. }
  42. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  43. /* Copy the whole inline data block */
  44. src_addr = inline_data_addr(ipage);
  45. dst_addr = kmap(page);
  46. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  47. kunmap(page);
  48. f2fs_put_page(ipage, 1);
  49. out:
  50. SetPageUptodate(page);
  51. unlock_page(page);
  52. return 0;
  53. }
  54. static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
  55. {
  56. int err;
  57. struct page *ipage;
  58. struct dnode_of_data dn;
  59. void *src_addr, *dst_addr;
  60. block_t new_blk_addr;
  61. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  62. struct f2fs_io_info fio = {
  63. .type = DATA,
  64. .rw = WRITE_SYNC | REQ_PRIO,
  65. };
  66. f2fs_lock_op(sbi);
  67. ipage = get_node_page(sbi, inode->i_ino);
  68. if (IS_ERR(ipage)) {
  69. err = PTR_ERR(ipage);
  70. goto out;
  71. }
  72. /*
  73. * i_addr[0] is not used for inline data,
  74. * so reserving new block will not destroy inline data
  75. */
  76. set_new_dnode(&dn, inode, ipage, NULL, 0);
  77. err = f2fs_reserve_block(&dn, 0);
  78. if (err)
  79. goto out;
  80. f2fs_wait_on_page_writeback(page, DATA);
  81. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  82. /* Copy the whole inline data block */
  83. src_addr = inline_data_addr(ipage);
  84. dst_addr = kmap(page);
  85. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  86. kunmap(page);
  87. SetPageUptodate(page);
  88. /* write data page to try to make data consistent */
  89. set_page_writeback(page);
  90. write_data_page(page, &dn, &new_blk_addr, &fio);
  91. update_extent_cache(new_blk_addr, &dn);
  92. f2fs_wait_on_page_writeback(page, DATA);
  93. /* clear inline data and flag after data writeback */
  94. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  95. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  96. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  97. stat_dec_inline_inode(inode);
  98. sync_inode_page(&dn);
  99. f2fs_put_dnode(&dn);
  100. out:
  101. f2fs_unlock_op(sbi);
  102. return err;
  103. }
  104. int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
  105. {
  106. struct page *page;
  107. int err;
  108. if (!f2fs_has_inline_data(inode))
  109. return 0;
  110. else if (to_size <= MAX_INLINE_DATA)
  111. return 0;
  112. page = grab_cache_page(inode->i_mapping, 0);
  113. if (!page)
  114. return -ENOMEM;
  115. err = __f2fs_convert_inline_data(inode, page);
  116. f2fs_put_page(page, 1);
  117. return err;
  118. }
  119. int f2fs_write_inline_data(struct inode *inode,
  120. struct page *page, unsigned size)
  121. {
  122. void *src_addr, *dst_addr;
  123. struct page *ipage;
  124. struct dnode_of_data dn;
  125. int err;
  126. set_new_dnode(&dn, inode, NULL, NULL, 0);
  127. err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
  128. if (err)
  129. return err;
  130. ipage = dn.inode_page;
  131. f2fs_wait_on_page_writeback(ipage, NODE);
  132. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  133. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  134. src_addr = kmap(page);
  135. dst_addr = inline_data_addr(ipage);
  136. memcpy(dst_addr, src_addr, size);
  137. kunmap(page);
  138. /* Release the first data block if it is allocated */
  139. if (!f2fs_has_inline_data(inode)) {
  140. truncate_data_blocks_range(&dn, 1);
  141. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  142. stat_inc_inline_inode(inode);
  143. }
  144. sync_inode_page(&dn);
  145. f2fs_put_dnode(&dn);
  146. return 0;
  147. }
  148. void truncate_inline_data(struct inode *inode, u64 from)
  149. {
  150. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  151. struct page *ipage;
  152. if (from >= MAX_INLINE_DATA)
  153. return;
  154. ipage = get_node_page(sbi, inode->i_ino);
  155. if (IS_ERR(ipage))
  156. return;
  157. f2fs_wait_on_page_writeback(ipage, NODE);
  158. zero_user_segment(ipage, INLINE_DATA_OFFSET + from,
  159. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  160. set_page_dirty(ipage);
  161. f2fs_put_page(ipage, 1);
  162. }
  163. int recover_inline_data(struct inode *inode, struct page *npage)
  164. {
  165. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  166. struct f2fs_inode *ri = NULL;
  167. void *src_addr, *dst_addr;
  168. struct page *ipage;
  169. /*
  170. * The inline_data recovery policy is as follows.
  171. * [prev.] [next] of inline_data flag
  172. * o o -> recover inline_data
  173. * o x -> remove inline_data, and then recover data blocks
  174. * x o -> remove inline_data, and then recover inline_data
  175. * x x -> recover data blocks
  176. */
  177. if (IS_INODE(npage))
  178. ri = F2FS_INODE(npage);
  179. if (f2fs_has_inline_data(inode) &&
  180. ri && ri->i_inline & F2FS_INLINE_DATA) {
  181. process_inline:
  182. ipage = get_node_page(sbi, inode->i_ino);
  183. f2fs_bug_on(IS_ERR(ipage));
  184. f2fs_wait_on_page_writeback(ipage, NODE);
  185. src_addr = inline_data_addr(npage);
  186. dst_addr = inline_data_addr(ipage);
  187. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  188. update_inode(inode, ipage);
  189. f2fs_put_page(ipage, 1);
  190. return -1;
  191. }
  192. if (f2fs_has_inline_data(inode)) {
  193. ipage = get_node_page(sbi, inode->i_ino);
  194. f2fs_bug_on(IS_ERR(ipage));
  195. f2fs_wait_on_page_writeback(ipage, NODE);
  196. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  197. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  198. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  199. update_inode(inode, ipage);
  200. f2fs_put_page(ipage, 1);
  201. } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
  202. truncate_blocks(inode, 0);
  203. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  204. goto process_inline;
  205. }
  206. return 0;
  207. }