inline.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * fs/f2fs/inline.c
  3. * Copyright (c) 2013, Intel Corporation
  4. * Authors: Huajun Li <huajun.li@intel.com>
  5. * Haicheng Li <haicheng.li@intel.com>
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/f2fs_fs.h>
  12. #include "f2fs.h"
  13. bool f2fs_may_inline(struct inode *inode)
  14. {
  15. block_t nr_blocks;
  16. loff_t i_size;
  17. if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
  18. return false;
  19. if (f2fs_is_atomic_file(inode))
  20. return false;
  21. nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
  22. if (inode->i_blocks > nr_blocks)
  23. return false;
  24. i_size = i_size_read(inode);
  25. if (i_size > MAX_INLINE_DATA)
  26. return false;
  27. return true;
  28. }
  29. int f2fs_read_inline_data(struct inode *inode, struct page *page)
  30. {
  31. struct page *ipage;
  32. void *src_addr, *dst_addr;
  33. if (page->index) {
  34. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  35. goto out;
  36. }
  37. ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
  38. if (IS_ERR(ipage)) {
  39. unlock_page(page);
  40. return PTR_ERR(ipage);
  41. }
  42. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  43. /* Copy the whole inline data block */
  44. src_addr = inline_data_addr(ipage);
  45. dst_addr = kmap(page);
  46. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  47. kunmap(page);
  48. f2fs_put_page(ipage, 1);
  49. out:
  50. SetPageUptodate(page);
  51. unlock_page(page);
  52. return 0;
  53. }
  54. static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
  55. {
  56. int err = 0;
  57. struct page *ipage;
  58. struct dnode_of_data dn;
  59. void *src_addr, *dst_addr;
  60. block_t new_blk_addr;
  61. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  62. struct f2fs_io_info fio = {
  63. .type = DATA,
  64. .rw = WRITE_SYNC | REQ_PRIO,
  65. };
  66. f2fs_lock_op(sbi);
  67. ipage = get_node_page(sbi, inode->i_ino);
  68. if (IS_ERR(ipage)) {
  69. err = PTR_ERR(ipage);
  70. goto out;
  71. }
  72. /* someone else converted inline_data already */
  73. if (!f2fs_has_inline_data(inode))
  74. goto out;
  75. /*
  76. * i_addr[0] is not used for inline data,
  77. * so reserving new block will not destroy inline data
  78. */
  79. set_new_dnode(&dn, inode, ipage, NULL, 0);
  80. err = f2fs_reserve_block(&dn, 0);
  81. if (err)
  82. goto out;
  83. f2fs_wait_on_page_writeback(page, DATA);
  84. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  85. /* Copy the whole inline data block */
  86. src_addr = inline_data_addr(ipage);
  87. dst_addr = kmap(page);
  88. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  89. kunmap(page);
  90. SetPageUptodate(page);
  91. /* write data page to try to make data consistent */
  92. set_page_writeback(page);
  93. write_data_page(page, &dn, &new_blk_addr, &fio);
  94. update_extent_cache(new_blk_addr, &dn);
  95. f2fs_wait_on_page_writeback(page, DATA);
  96. /* clear inline data and flag after data writeback */
  97. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  98. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  99. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  100. stat_dec_inline_inode(inode);
  101. sync_inode_page(&dn);
  102. f2fs_put_dnode(&dn);
  103. out:
  104. f2fs_unlock_op(sbi);
  105. return err;
  106. }
  107. int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size,
  108. struct page *page)
  109. {
  110. struct page *new_page = page;
  111. int err;
  112. if (!f2fs_has_inline_data(inode))
  113. return 0;
  114. else if (to_size <= MAX_INLINE_DATA)
  115. return 0;
  116. if (!page || page->index != 0) {
  117. new_page = grab_cache_page(inode->i_mapping, 0);
  118. if (!new_page)
  119. return -ENOMEM;
  120. }
  121. err = __f2fs_convert_inline_data(inode, new_page);
  122. if (!page || page->index != 0)
  123. f2fs_put_page(new_page, 1);
  124. return err;
  125. }
  126. int f2fs_write_inline_data(struct inode *inode,
  127. struct page *page, unsigned size)
  128. {
  129. void *src_addr, *dst_addr;
  130. struct page *ipage;
  131. struct dnode_of_data dn;
  132. int err;
  133. set_new_dnode(&dn, inode, NULL, NULL, 0);
  134. err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
  135. if (err)
  136. return err;
  137. ipage = dn.inode_page;
  138. f2fs_wait_on_page_writeback(ipage, NODE);
  139. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  140. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  141. src_addr = kmap(page);
  142. dst_addr = inline_data_addr(ipage);
  143. memcpy(dst_addr, src_addr, size);
  144. kunmap(page);
  145. /* Release the first data block if it is allocated */
  146. if (!f2fs_has_inline_data(inode)) {
  147. truncate_data_blocks_range(&dn, 1);
  148. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  149. stat_inc_inline_inode(inode);
  150. }
  151. set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
  152. sync_inode_page(&dn);
  153. f2fs_put_dnode(&dn);
  154. return 0;
  155. }
  156. void truncate_inline_data(struct inode *inode, u64 from)
  157. {
  158. struct page *ipage;
  159. if (from >= MAX_INLINE_DATA)
  160. return;
  161. ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
  162. if (IS_ERR(ipage))
  163. return;
  164. f2fs_wait_on_page_writeback(ipage, NODE);
  165. zero_user_segment(ipage, INLINE_DATA_OFFSET + from,
  166. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  167. set_page_dirty(ipage);
  168. f2fs_put_page(ipage, 1);
  169. }
  170. bool recover_inline_data(struct inode *inode, struct page *npage)
  171. {
  172. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  173. struct f2fs_inode *ri = NULL;
  174. void *src_addr, *dst_addr;
  175. struct page *ipage;
  176. /*
  177. * The inline_data recovery policy is as follows.
  178. * [prev.] [next] of inline_data flag
  179. * o o -> recover inline_data
  180. * o x -> remove inline_data, and then recover data blocks
  181. * x o -> remove inline_data, and then recover inline_data
  182. * x x -> recover data blocks
  183. */
  184. if (IS_INODE(npage))
  185. ri = F2FS_INODE(npage);
  186. if (f2fs_has_inline_data(inode) &&
  187. ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  188. process_inline:
  189. ipage = get_node_page(sbi, inode->i_ino);
  190. f2fs_bug_on(sbi, IS_ERR(ipage));
  191. f2fs_wait_on_page_writeback(ipage, NODE);
  192. src_addr = inline_data_addr(npage);
  193. dst_addr = inline_data_addr(ipage);
  194. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  195. update_inode(inode, ipage);
  196. f2fs_put_page(ipage, 1);
  197. return true;
  198. }
  199. if (f2fs_has_inline_data(inode)) {
  200. ipage = get_node_page(sbi, inode->i_ino);
  201. f2fs_bug_on(sbi, IS_ERR(ipage));
  202. f2fs_wait_on_page_writeback(ipage, NODE);
  203. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  204. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  205. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  206. update_inode(inode, ipage);
  207. f2fs_put_page(ipage, 1);
  208. } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  209. truncate_blocks(inode, 0, false);
  210. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  211. goto process_inline;
  212. }
  213. return false;
  214. }