qgroup.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2014 Facebook. All rights reserved.
  4. */
  5. #ifndef BTRFS_QGROUP_H
  6. #define BTRFS_QGROUP_H
  7. #include "ulist.h"
  8. #include "delayed-ref.h"
  9. /*
  10. * Btrfs qgroup overview
  11. *
  12. * Btrfs qgroup splits into 3 main part:
  13. * 1) Reserve
  14. * Reserve metadata/data space for incoming operations
  15. * Affect how qgroup limit works
  16. *
  17. * 2) Trace
  18. * Tell btrfs qgroup to trace dirty extents.
  19. *
  20. * Dirty extents including:
  21. * - Newly allocated extents
  22. * - Extents going to be deleted (in this trans)
  23. * - Extents whose owner is going to be modified
  24. *
  25. * This is the main part affects whether qgroup numbers will stay
  26. * consistent.
  27. * Btrfs qgroup can trace clean extents and won't cause any problem,
  28. * but it will consume extra CPU time, it should be avoided if possible.
  29. *
  30. * 3) Account
  31. * Btrfs qgroup will updates its numbers, based on dirty extents traced
  32. * in previous step.
  33. *
  34. * Normally at qgroup rescan and transaction commit time.
  35. */
  36. /*
  37. * Record a dirty extent, and info qgroup to update quota on it
  38. * TODO: Use kmem cache to alloc it.
  39. */
  40. struct btrfs_qgroup_extent_record {
  41. struct rb_node node;
  42. u64 bytenr;
  43. u64 num_bytes;
  44. struct ulist *old_roots;
  45. };
  46. /*
  47. * Qgroup reservation types:
  48. *
  49. * DATA:
  50. * space reserved for data
  51. *
  52. * META_PERTRANS:
  53. * Space reserved for metadata (per-transaction)
  54. * Due to the fact that qgroup data is only updated at transaction commit
  55. * time, reserved space for metadata must be kept until transaction
  56. * commits.
  57. * Any metadata reserved that are used in btrfs_start_transaction() should
  58. * be of this type.
  59. *
  60. * META_PREALLOC:
  61. * There are cases where metadata space is reserved before starting
  62. * transaction, and then btrfs_join_transaction() to get a trans handle.
  63. * Any metadata reserved for such usage should be of this type.
  64. * And after join_transaction() part (or all) of such reservation should
  65. * be converted into META_PERTRANS.
  66. */
  67. enum btrfs_qgroup_rsv_type {
  68. BTRFS_QGROUP_RSV_DATA = 0,
  69. BTRFS_QGROUP_RSV_META_PERTRANS,
  70. BTRFS_QGROUP_RSV_META_PREALLOC,
  71. BTRFS_QGROUP_RSV_LAST,
  72. };
  73. /*
  74. * Represents how many bytes we have reserved for this qgroup.
  75. *
  76. * Each type should have different reservation behavior.
  77. * E.g, data follows its io_tree flag modification, while
  78. * *currently* meta is just reserve-and-clear during transcation.
  79. *
  80. * TODO: Add new type for reservation which can survive transaction commit.
  81. * Currect metadata reservation behavior is not suitable for such case.
  82. */
  83. struct btrfs_qgroup_rsv {
  84. u64 values[BTRFS_QGROUP_RSV_LAST];
  85. };
  86. /*
  87. * one struct for each qgroup, organized in fs_info->qgroup_tree.
  88. */
  89. struct btrfs_qgroup {
  90. u64 qgroupid;
  91. /*
  92. * state
  93. */
  94. u64 rfer; /* referenced */
  95. u64 rfer_cmpr; /* referenced compressed */
  96. u64 excl; /* exclusive */
  97. u64 excl_cmpr; /* exclusive compressed */
  98. /*
  99. * limits
  100. */
  101. u64 lim_flags; /* which limits are set */
  102. u64 max_rfer;
  103. u64 max_excl;
  104. u64 rsv_rfer;
  105. u64 rsv_excl;
  106. /*
  107. * reservation tracking
  108. */
  109. struct btrfs_qgroup_rsv rsv;
  110. /*
  111. * lists
  112. */
  113. struct list_head groups; /* groups this group is member of */
  114. struct list_head members; /* groups that are members of this group */
  115. struct list_head dirty; /* dirty groups */
  116. struct rb_node node; /* tree of qgroups */
  117. /*
  118. * temp variables for accounting operations
  119. * Refer to qgroup_shared_accounting() for details.
  120. */
  121. u64 old_refcnt;
  122. u64 new_refcnt;
  123. };
  124. /*
  125. * For qgroup event trace points only
  126. */
  127. #define QGROUP_RESERVE (1<<0)
  128. #define QGROUP_RELEASE (1<<1)
  129. #define QGROUP_FREE (1<<2)
  130. int btrfs_quota_enable(struct btrfs_trans_handle *trans,
  131. struct btrfs_fs_info *fs_info);
  132. int btrfs_quota_disable(struct btrfs_trans_handle *trans,
  133. struct btrfs_fs_info *fs_info);
  134. int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
  135. void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
  136. int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
  137. bool interruptible);
  138. int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
  139. struct btrfs_fs_info *fs_info, u64 src, u64 dst);
  140. int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
  141. struct btrfs_fs_info *fs_info, u64 src, u64 dst);
  142. int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
  143. struct btrfs_fs_info *fs_info, u64 qgroupid);
  144. int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
  145. struct btrfs_fs_info *fs_info, u64 qgroupid);
  146. int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
  147. struct btrfs_fs_info *fs_info, u64 qgroupid,
  148. struct btrfs_qgroup_limit *limit);
  149. int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
  150. void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
  151. struct btrfs_delayed_extent_op;
  152. /*
  153. * Inform qgroup to trace one dirty extent, its info is recorded in @record.
  154. * So qgroup can account it at transaction committing time.
  155. *
  156. * No lock version, caller must acquire delayed ref lock and allocated memory,
  157. * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
  158. *
  159. * Return 0 for success insert
  160. * Return >0 for existing record, caller can free @record safely.
  161. * Error is not possible
  162. */
  163. int btrfs_qgroup_trace_extent_nolock(
  164. struct btrfs_fs_info *fs_info,
  165. struct btrfs_delayed_ref_root *delayed_refs,
  166. struct btrfs_qgroup_extent_record *record);
  167. /*
  168. * Post handler after qgroup_trace_extent_nolock().
  169. *
  170. * NOTE: Current qgroup does the expensive backref walk at transaction
  171. * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
  172. * new transaction.
  173. * This is designed to allow btrfs_find_all_roots() to get correct new_roots
  174. * result.
  175. *
  176. * However for old_roots there is no need to do backref walk at that time,
  177. * since we search commit roots to walk backref and result will always be
  178. * correct.
  179. *
  180. * Due to the nature of no lock version, we can't do backref there.
  181. * So we must call btrfs_qgroup_trace_extent_post() after exiting
  182. * spinlock context.
  183. *
  184. * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
  185. * using current root, then we can move all expensive backref walk out of
  186. * transaction committing, but not now as qgroup accounting will be wrong again.
  187. */
  188. int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
  189. struct btrfs_qgroup_extent_record *qrecord);
  190. /*
  191. * Inform qgroup to trace one dirty extent, specified by @bytenr and
  192. * @num_bytes.
  193. * So qgroup can account it at commit trans time.
  194. *
  195. * Better encapsulated version, with memory allocation and backref walk for
  196. * commit roots.
  197. * So this can sleep.
  198. *
  199. * Return 0 if the operation is done.
  200. * Return <0 for error, like memory allocation failure or invalid parameter
  201. * (NULL trans)
  202. */
  203. int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
  204. struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
  205. gfp_t gfp_flag);
  206. /*
  207. * Inform qgroup to trace all leaf items of data
  208. *
  209. * Return 0 for success
  210. * Return <0 for error(ENOMEM)
  211. */
  212. int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
  213. struct btrfs_fs_info *fs_info,
  214. struct extent_buffer *eb);
  215. /*
  216. * Inform qgroup to trace a whole subtree, including all its child tree
  217. * blocks and data.
  218. * The root tree block is specified by @root_eb.
  219. *
  220. * Normally used by relocation(tree block swap) and subvolume deletion.
  221. *
  222. * Return 0 for success
  223. * Return <0 for error(ENOMEM or tree search error)
  224. */
  225. int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
  226. struct btrfs_root *root,
  227. struct extent_buffer *root_eb,
  228. u64 root_gen, int root_level);
  229. int
  230. btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
  231. struct btrfs_fs_info *fs_info,
  232. u64 bytenr, u64 num_bytes,
  233. struct ulist *old_roots, struct ulist *new_roots);
  234. int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
  235. int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
  236. struct btrfs_fs_info *fs_info);
  237. int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
  238. struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
  239. struct btrfs_qgroup_inherit *inherit);
  240. void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
  241. u64 ref_root, u64 num_bytes,
  242. enum btrfs_qgroup_rsv_type type);
  243. static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
  244. u64 ref_root, u64 num_bytes)
  245. {
  246. trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
  247. btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes,
  248. BTRFS_QGROUP_RSV_DATA);
  249. }
  250. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  251. int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
  252. u64 rfer, u64 excl);
  253. #endif
  254. /* New io_tree based accurate qgroup reserve API */
  255. int btrfs_qgroup_reserve_data(struct inode *inode,
  256. struct extent_changeset **reserved, u64 start, u64 len);
  257. int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len);
  258. int btrfs_qgroup_free_data(struct inode *inode,
  259. struct extent_changeset *reserved, u64 start, u64 len);
  260. int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
  261. enum btrfs_qgroup_rsv_type type, bool enforce);
  262. /* Reserve metadata space for pertrans and prealloc type */
  263. static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
  264. int num_bytes, bool enforce)
  265. {
  266. return __btrfs_qgroup_reserve_meta(root, num_bytes,
  267. BTRFS_QGROUP_RSV_META_PERTRANS, enforce);
  268. }
  269. static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
  270. int num_bytes, bool enforce)
  271. {
  272. return __btrfs_qgroup_reserve_meta(root, num_bytes,
  273. BTRFS_QGROUP_RSV_META_PREALLOC, enforce);
  274. }
  275. void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
  276. enum btrfs_qgroup_rsv_type type);
  277. /* Free per-transaction meta reservation for error handling */
  278. static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root,
  279. int num_bytes)
  280. {
  281. __btrfs_qgroup_free_meta(root, num_bytes,
  282. BTRFS_QGROUP_RSV_META_PERTRANS);
  283. }
  284. /* Pre-allocated meta reservation can be freed at need */
  285. static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
  286. int num_bytes)
  287. {
  288. __btrfs_qgroup_free_meta(root, num_bytes,
  289. BTRFS_QGROUP_RSV_META_PREALLOC);
  290. }
  291. /*
  292. * Per-transaction meta reservation should be all freed at transaction commit
  293. * time
  294. */
  295. void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
  296. /*
  297. * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
  298. *
  299. * This is called when preallocated meta reservation needs to be used.
  300. * Normally after btrfs_join_transaction() call.
  301. */
  302. void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
  303. void btrfs_qgroup_check_reserved_leak(struct inode *inode);
  304. #endif