btrfs_inode.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #ifndef __BTRFS_I__
  19. #define __BTRFS_I__
  20. #include <linux/hash.h>
  21. #include "extent_map.h"
  22. #include "extent_io.h"
  23. #include "ordered-data.h"
  24. #include "delayed-inode.h"
  25. /*
  26. * ordered_data_close is set by truncate when a file that used
  27. * to have good data has been truncated to zero. When it is set
  28. * the btrfs file release call will add this inode to the
  29. * ordered operations list so that we make sure to flush out any
  30. * new data the application may have written before commit.
  31. */
  32. #define BTRFS_INODE_ORDERED_DATA_CLOSE 0
  33. #define BTRFS_INODE_ORPHAN_META_RESERVED 1
  34. #define BTRFS_INODE_DUMMY 2
  35. #define BTRFS_INODE_IN_DEFRAG 3
  36. #define BTRFS_INODE_DELALLOC_META_RESERVED 4
  37. #define BTRFS_INODE_HAS_ORPHAN_ITEM 5
  38. #define BTRFS_INODE_HAS_ASYNC_EXTENT 6
  39. #define BTRFS_INODE_NEEDS_FULL_SYNC 7
  40. #define BTRFS_INODE_COPY_EVERYTHING 8
  41. #define BTRFS_INODE_IN_DELALLOC_LIST 9
  42. #define BTRFS_INODE_READDIO_NEED_LOCK 10
  43. #define BTRFS_INODE_HAS_PROPS 11
  44. /* in memory btrfs inode */
  45. struct btrfs_inode {
  46. /* which subvolume this inode belongs to */
  47. struct btrfs_root *root;
  48. /* key used to find this inode on disk. This is used by the code
  49. * to read in roots of subvolumes
  50. */
  51. struct btrfs_key location;
  52. /*
  53. * Lock for counters and all fields used to determine if the inode is in
  54. * the log or not (last_trans, last_sub_trans, last_log_commit,
  55. * logged_trans).
  56. */
  57. spinlock_t lock;
  58. /* the extent_tree has caches of all the extent mappings to disk */
  59. struct extent_map_tree extent_tree;
  60. /* the io_tree does range state (DIRTY, LOCKED etc) */
  61. struct extent_io_tree io_tree;
  62. /* special utility tree used to record which mirrors have already been
  63. * tried when checksums fail for a given block
  64. */
  65. struct extent_io_tree io_failure_tree;
  66. /* held while logging the inode in tree-log.c */
  67. struct mutex log_mutex;
  68. /* held while doing delalloc reservations */
  69. struct mutex delalloc_mutex;
  70. /* used to order data wrt metadata */
  71. struct btrfs_ordered_inode_tree ordered_tree;
  72. /* list of all the delalloc inodes in the FS. There are times we need
  73. * to write all the delalloc pages to disk, and this list is used
  74. * to walk them all.
  75. */
  76. struct list_head delalloc_inodes;
  77. /* node for the red-black tree that links inodes in subvolume root */
  78. struct rb_node rb_node;
  79. unsigned long runtime_flags;
  80. /* Keep track of who's O_SYNC/fsyncing currently */
  81. atomic_t sync_writers;
  82. /* full 64 bit generation number, struct vfs_inode doesn't have a big
  83. * enough field for this.
  84. */
  85. u64 generation;
  86. /*
  87. * transid of the trans_handle that last modified this inode
  88. */
  89. u64 last_trans;
  90. /*
  91. * transid that last logged this inode
  92. */
  93. u64 logged_trans;
  94. /*
  95. * log transid when this inode was last modified
  96. */
  97. int last_sub_trans;
  98. /* a local copy of root's last_log_commit */
  99. int last_log_commit;
  100. /* total number of bytes pending delalloc, used by stat to calc the
  101. * real block usage of the file
  102. */
  103. u64 delalloc_bytes;
  104. /*
  105. * Total number of bytes pending delalloc that fall within a file
  106. * range that is either a hole or beyond EOF (and no prealloc extent
  107. * exists in the range). This is always <= delalloc_bytes.
  108. */
  109. u64 new_delalloc_bytes;
  110. /*
  111. * total number of bytes pending defrag, used by stat to check whether
  112. * it needs COW.
  113. */
  114. u64 defrag_bytes;
  115. /*
  116. * the size of the file stored in the metadata on disk. data=ordered
  117. * means the in-memory i_size might be larger than the size on disk
  118. * because not all the blocks are written yet.
  119. */
  120. u64 disk_i_size;
  121. /*
  122. * if this is a directory then index_cnt is the counter for the index
  123. * number for new files that are created
  124. */
  125. u64 index_cnt;
  126. /* Cache the directory index number to speed the dir/file remove */
  127. u64 dir_index;
  128. /* the fsync log has some corner cases that mean we have to check
  129. * directories to see if any unlinks have been done before
  130. * the directory was logged. See tree-log.c for all the
  131. * details
  132. */
  133. u64 last_unlink_trans;
  134. /*
  135. * Number of bytes outstanding that are going to need csums. This is
  136. * used in ENOSPC accounting.
  137. */
  138. u64 csum_bytes;
  139. /* flags field from the on disk inode */
  140. u32 flags;
  141. /*
  142. * Counters to keep track of the number of extent item's we may use due
  143. * to delalloc and such. outstanding_extents is the number of extent
  144. * items we think we'll end up using, and reserved_extents is the number
  145. * of extent items we've reserved metadata for.
  146. */
  147. unsigned outstanding_extents;
  148. unsigned reserved_extents;
  149. /*
  150. * always compress this one file
  151. */
  152. unsigned force_compress;
  153. struct btrfs_delayed_node *delayed_node;
  154. /* File creation time. */
  155. struct timespec i_otime;
  156. /* Hook into fs_info->delayed_iputs */
  157. struct list_head delayed_iput;
  158. long delayed_iput_count;
  159. /*
  160. * To avoid races between lockless (i_mutex not held) direct IO writes
  161. * and concurrent fsync requests. Direct IO writes must acquire read
  162. * access on this semaphore for creating an extent map and its
  163. * corresponding ordered extent. The fast fsync path must acquire write
  164. * access on this semaphore before it collects ordered extents and
  165. * extent maps.
  166. */
  167. struct rw_semaphore dio_sem;
  168. struct inode vfs_inode;
  169. };
  170. extern unsigned char btrfs_filetype_table[];
  171. static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
  172. {
  173. return container_of(inode, struct btrfs_inode, vfs_inode);
  174. }
  175. static inline unsigned long btrfs_inode_hash(u64 objectid,
  176. const struct btrfs_root *root)
  177. {
  178. u64 h = objectid ^ (root->objectid * GOLDEN_RATIO_PRIME);
  179. #if BITS_PER_LONG == 32
  180. h = (h >> 32) ^ (h & 0xffffffff);
  181. #endif
  182. return (unsigned long)h;
  183. }
  184. static inline void btrfs_insert_inode_hash(struct inode *inode)
  185. {
  186. unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
  187. __insert_inode_hash(inode, h);
  188. }
  189. static inline u64 btrfs_ino(struct btrfs_inode *inode)
  190. {
  191. u64 ino = inode->location.objectid;
  192. /*
  193. * !ino: btree_inode
  194. * type == BTRFS_ROOT_ITEM_KEY: subvol dir
  195. */
  196. if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
  197. ino = inode->vfs_inode.i_ino;
  198. return ino;
  199. }
  200. static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
  201. {
  202. i_size_write(&inode->vfs_inode, size);
  203. inode->disk_i_size = size;
  204. }
  205. static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
  206. {
  207. struct btrfs_root *root = inode->root;
  208. if (root == root->fs_info->tree_root &&
  209. btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
  210. return true;
  211. if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
  212. return true;
  213. return false;
  214. }
  215. static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
  216. {
  217. int ret = 0;
  218. spin_lock(&inode->lock);
  219. if (inode->logged_trans == generation &&
  220. inode->last_sub_trans <= inode->last_log_commit &&
  221. inode->last_sub_trans <= inode->root->last_log_commit) {
  222. /*
  223. * After a ranged fsync we might have left some extent maps
  224. * (that fall outside the fsync's range). So return false
  225. * here if the list isn't empty, to make sure btrfs_log_inode()
  226. * will be called and process those extent maps.
  227. */
  228. smp_mb();
  229. if (list_empty(&inode->extent_tree.modified_extents))
  230. ret = 1;
  231. }
  232. spin_unlock(&inode->lock);
  233. return ret;
  234. }
  235. #define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
  236. struct btrfs_dio_private {
  237. struct inode *inode;
  238. unsigned long flags;
  239. u64 logical_offset;
  240. u64 disk_bytenr;
  241. u64 bytes;
  242. void *private;
  243. /* number of bios pending for this dio */
  244. atomic_t pending_bios;
  245. /* IO errors */
  246. int errors;
  247. /* orig_bio is our btrfs_io_bio */
  248. struct bio *orig_bio;
  249. /* dio_bio came from fs/direct-io.c */
  250. struct bio *dio_bio;
  251. /*
  252. * The original bio may be split to several sub-bios, this is
  253. * done during endio of sub-bios
  254. */
  255. blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
  256. blk_status_t);
  257. };
  258. /*
  259. * Disable DIO read nolock optimization, so new dio readers will be forced
  260. * to grab i_mutex. It is used to avoid the endless truncate due to
  261. * nonlocked dio read.
  262. */
  263. static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
  264. {
  265. set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
  266. smp_mb();
  267. }
  268. static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
  269. {
  270. smp_mb__before_atomic();
  271. clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
  272. }
  273. static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
  274. u64 logical_start, u32 csum, u32 csum_expected, int mirror_num)
  275. {
  276. struct btrfs_root *root = inode->root;
  277. /* Output minus objectid, which is more meaningful */
  278. if (root->objectid >= BTRFS_LAST_FREE_OBJECTID)
  279. btrfs_warn_rl(root->fs_info,
  280. "csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d",
  281. root->objectid, btrfs_ino(inode),
  282. logical_start, csum, csum_expected, mirror_num);
  283. else
  284. btrfs_warn_rl(root->fs_info,
  285. "csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d",
  286. root->objectid, btrfs_ino(inode),
  287. logical_start, csum, csum_expected, mirror_num);
  288. }
  289. bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end);
  290. #endif