inline.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * fs/f2fs/inline.c
  3. * Copyright (c) 2013, Intel Corporation
  4. * Authors: Huajun Li <huajun.li@intel.com>
  5. * Haicheng Li <haicheng.li@intel.com>
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/f2fs_fs.h>
  12. #include "f2fs.h"
  13. bool f2fs_may_inline(struct inode *inode)
  14. {
  15. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  16. block_t nr_blocks;
  17. loff_t i_size;
  18. if (!test_opt(sbi, INLINE_DATA))
  19. return false;
  20. nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
  21. if (inode->i_blocks > nr_blocks)
  22. return false;
  23. i_size = i_size_read(inode);
  24. if (i_size > MAX_INLINE_DATA)
  25. return false;
  26. return true;
  27. }
  28. int f2fs_read_inline_data(struct inode *inode, struct page *page)
  29. {
  30. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  31. struct page *ipage;
  32. void *src_addr, *dst_addr;
  33. if (page->index) {
  34. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  35. goto out;
  36. }
  37. ipage = get_node_page(sbi, inode->i_ino);
  38. if (IS_ERR(ipage)) {
  39. unlock_page(page);
  40. return PTR_ERR(ipage);
  41. }
  42. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  43. /* Copy the whole inline data block */
  44. src_addr = inline_data_addr(ipage);
  45. dst_addr = kmap(page);
  46. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  47. kunmap(page);
  48. f2fs_put_page(ipage, 1);
  49. out:
  50. SetPageUptodate(page);
  51. unlock_page(page);
  52. return 0;
  53. }
  54. static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
  55. {
  56. int err = 0;
  57. struct page *ipage;
  58. struct dnode_of_data dn;
  59. void *src_addr, *dst_addr;
  60. block_t new_blk_addr;
  61. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  62. struct f2fs_io_info fio = {
  63. .type = DATA,
  64. .rw = WRITE_SYNC | REQ_PRIO,
  65. };
  66. f2fs_lock_op(sbi);
  67. ipage = get_node_page(sbi, inode->i_ino);
  68. if (IS_ERR(ipage)) {
  69. err = PTR_ERR(ipage);
  70. goto out;
  71. }
  72. /* someone else converted inline_data already */
  73. if (!f2fs_has_inline_data(inode))
  74. goto out;
  75. /*
  76. * i_addr[0] is not used for inline data,
  77. * so reserving new block will not destroy inline data
  78. */
  79. set_new_dnode(&dn, inode, ipage, NULL, 0);
  80. err = f2fs_reserve_block(&dn, 0);
  81. if (err)
  82. goto out;
  83. f2fs_wait_on_page_writeback(page, DATA);
  84. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  85. /* Copy the whole inline data block */
  86. src_addr = inline_data_addr(ipage);
  87. dst_addr = kmap(page);
  88. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  89. kunmap(page);
  90. SetPageUptodate(page);
  91. /* write data page to try to make data consistent */
  92. set_page_writeback(page);
  93. write_data_page(page, &dn, &new_blk_addr, &fio);
  94. update_extent_cache(new_blk_addr, &dn);
  95. f2fs_wait_on_page_writeback(page, DATA);
  96. /* clear inline data and flag after data writeback */
  97. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  98. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  99. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  100. stat_dec_inline_inode(inode);
  101. sync_inode_page(&dn);
  102. f2fs_put_dnode(&dn);
  103. out:
  104. f2fs_unlock_op(sbi);
  105. return err;
  106. }
  107. int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size,
  108. struct page *page)
  109. {
  110. struct page *new_page = page;
  111. int err;
  112. if (!f2fs_has_inline_data(inode))
  113. return 0;
  114. else if (to_size <= MAX_INLINE_DATA)
  115. return 0;
  116. if (!page || page->index != 0) {
  117. new_page = grab_cache_page(inode->i_mapping, 0);
  118. if (!new_page)
  119. return -ENOMEM;
  120. }
  121. err = __f2fs_convert_inline_data(inode, new_page);
  122. if (!page || page->index != 0)
  123. f2fs_put_page(new_page, 1);
  124. return err;
  125. }
  126. int f2fs_write_inline_data(struct inode *inode,
  127. struct page *page, unsigned size)
  128. {
  129. void *src_addr, *dst_addr;
  130. struct page *ipage;
  131. struct dnode_of_data dn;
  132. int err;
  133. set_new_dnode(&dn, inode, NULL, NULL, 0);
  134. err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
  135. if (err)
  136. return err;
  137. ipage = dn.inode_page;
  138. f2fs_wait_on_page_writeback(ipage, NODE);
  139. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  140. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  141. src_addr = kmap(page);
  142. dst_addr = inline_data_addr(ipage);
  143. memcpy(dst_addr, src_addr, size);
  144. kunmap(page);
  145. /* Release the first data block if it is allocated */
  146. if (!f2fs_has_inline_data(inode)) {
  147. truncate_data_blocks_range(&dn, 1);
  148. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  149. stat_inc_inline_inode(inode);
  150. }
  151. set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
  152. sync_inode_page(&dn);
  153. f2fs_put_dnode(&dn);
  154. return 0;
  155. }
  156. void truncate_inline_data(struct inode *inode, u64 from)
  157. {
  158. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  159. struct page *ipage;
  160. if (from >= MAX_INLINE_DATA)
  161. return;
  162. ipage = get_node_page(sbi, inode->i_ino);
  163. if (IS_ERR(ipage))
  164. return;
  165. f2fs_wait_on_page_writeback(ipage, NODE);
  166. zero_user_segment(ipage, INLINE_DATA_OFFSET + from,
  167. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  168. set_page_dirty(ipage);
  169. f2fs_put_page(ipage, 1);
  170. }
  171. bool recover_inline_data(struct inode *inode, struct page *npage)
  172. {
  173. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  174. struct f2fs_inode *ri = NULL;
  175. void *src_addr, *dst_addr;
  176. struct page *ipage;
  177. /*
  178. * The inline_data recovery policy is as follows.
  179. * [prev.] [next] of inline_data flag
  180. * o o -> recover inline_data
  181. * o x -> remove inline_data, and then recover data blocks
  182. * x o -> remove inline_data, and then recover inline_data
  183. * x x -> recover data blocks
  184. */
  185. if (IS_INODE(npage))
  186. ri = F2FS_INODE(npage);
  187. if (f2fs_has_inline_data(inode) &&
  188. ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  189. process_inline:
  190. ipage = get_node_page(sbi, inode->i_ino);
  191. f2fs_bug_on(IS_ERR(ipage));
  192. f2fs_wait_on_page_writeback(ipage, NODE);
  193. src_addr = inline_data_addr(npage);
  194. dst_addr = inline_data_addr(ipage);
  195. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  196. update_inode(inode, ipage);
  197. f2fs_put_page(ipage, 1);
  198. return true;
  199. }
  200. if (f2fs_has_inline_data(inode)) {
  201. ipage = get_node_page(sbi, inode->i_ino);
  202. f2fs_bug_on(IS_ERR(ipage));
  203. f2fs_wait_on_page_writeback(ipage, NODE);
  204. zero_user_segment(ipage, INLINE_DATA_OFFSET,
  205. INLINE_DATA_OFFSET + MAX_INLINE_DATA);
  206. clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  207. update_inode(inode, ipage);
  208. f2fs_put_page(ipage, 1);
  209. } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  210. truncate_blocks(inode, 0, false);
  211. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  212. goto process_inline;
  213. }
  214. return false;
  215. }