delayed-ref.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * Copyright (C) 2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #ifndef __DELAYED_REF__
  19. #define __DELAYED_REF__
  20. /* these are the possible values of struct btrfs_delayed_ref_node->action */
  21. #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
  22. #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
  23. #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
  24. #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
  25. /*
  26. * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
  27. * same ref_node structure.
  28. * Ref_head is in a higher logic level than tree/data ref, and duplicated
  29. * bytenr/num_bytes in ref_node is really a waste or memory, they should be
  30. * referred from ref_head.
  31. * This gets more disgusting after we use list to store tree/data ref in
  32. * ref_head. Must clean this mess up later.
  33. */
  34. struct btrfs_delayed_ref_node {
  35. /*data/tree ref use list, stored in ref_head->ref_list. */
  36. struct list_head list;
  37. /*
  38. * If action is BTRFS_ADD_DELAYED_REF, also link this node to
  39. * ref_head->ref_add_list, then we do not need to iterate the
  40. * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
  41. */
  42. struct list_head add_list;
  43. /* the starting bytenr of the extent */
  44. u64 bytenr;
  45. /* the size of the extent */
  46. u64 num_bytes;
  47. /* seq number to keep track of insertion order */
  48. u64 seq;
  49. /* ref count on this data structure */
  50. atomic_t refs;
  51. /*
  52. * how many refs is this entry adding or deleting. For
  53. * head refs, this may be a negative number because it is keeping
  54. * track of the total mods done to the reference count.
  55. * For individual refs, this will always be a positive number
  56. *
  57. * It may be more than one, since it is possible for a single
  58. * parent to have more than one ref on an extent
  59. */
  60. int ref_mod;
  61. unsigned int action:8;
  62. unsigned int type:8;
  63. /* is this node still in the rbtree? */
  64. unsigned int is_head:1;
  65. unsigned int in_tree:1;
  66. };
  67. struct btrfs_delayed_extent_op {
  68. struct btrfs_disk_key key;
  69. u8 level;
  70. bool update_key;
  71. bool update_flags;
  72. bool is_data;
  73. u64 flags_to_set;
  74. };
  75. /*
  76. * the head refs are used to hold a lock on a given extent, which allows us
  77. * to make sure that only one process is running the delayed refs
  78. * at a time for a single extent. They also store the sum of all the
  79. * reference count modifications we've queued up.
  80. */
  81. struct btrfs_delayed_ref_head {
  82. struct btrfs_delayed_ref_node node;
  83. /*
  84. * the mutex is held while running the refs, and it is also
  85. * held when checking the sum of reference modifications.
  86. */
  87. struct mutex mutex;
  88. spinlock_t lock;
  89. struct list_head ref_list;
  90. /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
  91. struct list_head ref_add_list;
  92. struct rb_node href_node;
  93. struct btrfs_delayed_extent_op *extent_op;
  94. /*
  95. * This is used to track the final ref_mod from all the refs associated
  96. * with this head ref, this is not adjusted as delayed refs are run,
  97. * this is meant to track if we need to do the csum accounting or not.
  98. */
  99. int total_ref_mod;
  100. /*
  101. * For qgroup reserved space freeing.
  102. *
  103. * ref_root and reserved will be recorded after
  104. * BTRFS_ADD_DELAYED_EXTENT is called.
  105. * And will be used to free reserved qgroup space at
  106. * run_delayed_refs() time.
  107. */
  108. u64 qgroup_ref_root;
  109. u64 qgroup_reserved;
  110. /*
  111. * when a new extent is allocated, it is just reserved in memory
  112. * The actual extent isn't inserted into the extent allocation tree
  113. * until the delayed ref is processed. must_insert_reserved is
  114. * used to flag a delayed ref so the accounting can be updated
  115. * when a full insert is done.
  116. *
  117. * It is possible the extent will be freed before it is ever
  118. * inserted into the extent allocation tree. In this case
  119. * we need to update the in ram accounting to properly reflect
  120. * the free has happened.
  121. */
  122. unsigned int must_insert_reserved:1;
  123. unsigned int is_data:1;
  124. unsigned int processing:1;
  125. };
  126. struct btrfs_delayed_tree_ref {
  127. struct btrfs_delayed_ref_node node;
  128. u64 root;
  129. u64 parent;
  130. int level;
  131. };
  132. struct btrfs_delayed_data_ref {
  133. struct btrfs_delayed_ref_node node;
  134. u64 root;
  135. u64 parent;
  136. u64 objectid;
  137. u64 offset;
  138. };
  139. struct btrfs_delayed_ref_root {
  140. /* head ref rbtree */
  141. struct rb_root href_root;
  142. /* dirty extent records */
  143. struct rb_root dirty_extent_root;
  144. /* this spin lock protects the rbtree and the entries inside */
  145. spinlock_t lock;
  146. /* how many delayed ref updates we've queued, used by the
  147. * throttling code
  148. */
  149. atomic_t num_entries;
  150. /* total number of head nodes in tree */
  151. unsigned long num_heads;
  152. /* total number of head nodes ready for processing */
  153. unsigned long num_heads_ready;
  154. u64 pending_csums;
  155. /*
  156. * set when the tree is flushing before a transaction commit,
  157. * used by the throttling code to decide if new updates need
  158. * to be run right away
  159. */
  160. int flushing;
  161. u64 run_delayed_start;
  162. /*
  163. * To make qgroup to skip given root.
  164. * This is for snapshot, as btrfs_qgroup_inherit() will manually
  165. * modify counters for snapshot and its source, so we should skip
  166. * the snapshot in new_root/old_roots or it will get calculated twice
  167. */
  168. u64 qgroup_to_skip;
  169. };
  170. extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
  171. extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  172. extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
  173. extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
  174. int btrfs_delayed_ref_init(void);
  175. void btrfs_delayed_ref_exit(void);
  176. static inline struct btrfs_delayed_extent_op *
  177. btrfs_alloc_delayed_extent_op(void)
  178. {
  179. return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
  180. }
  181. static inline void
  182. btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
  183. {
  184. if (op)
  185. kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
  186. }
  187. static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
  188. {
  189. WARN_ON(atomic_read(&ref->refs) == 0);
  190. if (atomic_dec_and_test(&ref->refs)) {
  191. WARN_ON(ref->in_tree);
  192. switch (ref->type) {
  193. case BTRFS_TREE_BLOCK_REF_KEY:
  194. case BTRFS_SHARED_BLOCK_REF_KEY:
  195. kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
  196. break;
  197. case BTRFS_EXTENT_DATA_REF_KEY:
  198. case BTRFS_SHARED_DATA_REF_KEY:
  199. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  200. break;
  201. case 0:
  202. kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
  203. break;
  204. default:
  205. BUG();
  206. }
  207. }
  208. }
  209. int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  210. struct btrfs_trans_handle *trans,
  211. u64 bytenr, u64 num_bytes, u64 parent,
  212. u64 ref_root, int level, int action,
  213. struct btrfs_delayed_extent_op *extent_op);
  214. int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  215. struct btrfs_trans_handle *trans,
  216. u64 bytenr, u64 num_bytes,
  217. u64 parent, u64 ref_root,
  218. u64 owner, u64 offset, u64 reserved, int action,
  219. struct btrfs_delayed_extent_op *extent_op);
  220. int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
  221. struct btrfs_trans_handle *trans,
  222. u64 bytenr, u64 num_bytes,
  223. struct btrfs_delayed_extent_op *extent_op);
  224. void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
  225. struct btrfs_fs_info *fs_info,
  226. struct btrfs_delayed_ref_root *delayed_refs,
  227. struct btrfs_delayed_ref_head *head);
  228. struct btrfs_delayed_ref_head *
  229. btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
  230. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  231. struct btrfs_delayed_ref_head *head);
  232. static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
  233. {
  234. mutex_unlock(&head->mutex);
  235. }
  236. struct btrfs_delayed_ref_head *
  237. btrfs_select_ref_head(struct btrfs_trans_handle *trans);
  238. int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
  239. struct btrfs_delayed_ref_root *delayed_refs,
  240. u64 seq);
  241. /*
  242. * a node might live in a head or a regular ref, this lets you
  243. * test for the proper type to use.
  244. */
  245. static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
  246. {
  247. return node->is_head;
  248. }
  249. /*
  250. * helper functions to cast a node into its container
  251. */
  252. static inline struct btrfs_delayed_tree_ref *
  253. btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
  254. {
  255. WARN_ON(btrfs_delayed_ref_is_head(node));
  256. return container_of(node, struct btrfs_delayed_tree_ref, node);
  257. }
  258. static inline struct btrfs_delayed_data_ref *
  259. btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
  260. {
  261. WARN_ON(btrfs_delayed_ref_is_head(node));
  262. return container_of(node, struct btrfs_delayed_data_ref, node);
  263. }
  264. static inline struct btrfs_delayed_ref_head *
  265. btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
  266. {
  267. WARN_ON(!btrfs_delayed_ref_is_head(node));
  268. return container_of(node, struct btrfs_delayed_ref_head, node);
  269. }
  270. #endif