delayed-ref.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009 Oracle. All rights reserved.
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/slab.h>
  7. #include <linux/sort.h>
  8. #include "ctree.h"
  9. #include "delayed-ref.h"
  10. #include "transaction.h"
  11. #include "qgroup.h"
  12. struct kmem_cache *btrfs_delayed_ref_head_cachep;
  13. struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  14. struct kmem_cache *btrfs_delayed_data_ref_cachep;
  15. struct kmem_cache *btrfs_delayed_extent_op_cachep;
  16. /*
  17. * delayed back reference update tracking. For subvolume trees
  18. * we queue up extent allocations and backref maintenance for
  19. * delayed processing. This avoids deep call chains where we
  20. * add extents in the middle of btrfs_search_slot, and it allows
  21. * us to buffer up frequently modified backrefs in an rb tree instead
  22. * of hammering updates on the extent allocation tree.
  23. */
  24. /*
  25. * compare two delayed tree backrefs with same bytenr and type
  26. */
  27. static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
  28. struct btrfs_delayed_tree_ref *ref2)
  29. {
  30. if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
  31. if (ref1->root < ref2->root)
  32. return -1;
  33. if (ref1->root > ref2->root)
  34. return 1;
  35. } else {
  36. if (ref1->parent < ref2->parent)
  37. return -1;
  38. if (ref1->parent > ref2->parent)
  39. return 1;
  40. }
  41. return 0;
  42. }
  43. /*
  44. * compare two delayed data backrefs with same bytenr and type
  45. */
  46. static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
  47. struct btrfs_delayed_data_ref *ref2)
  48. {
  49. if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  50. if (ref1->root < ref2->root)
  51. return -1;
  52. if (ref1->root > ref2->root)
  53. return 1;
  54. if (ref1->objectid < ref2->objectid)
  55. return -1;
  56. if (ref1->objectid > ref2->objectid)
  57. return 1;
  58. if (ref1->offset < ref2->offset)
  59. return -1;
  60. if (ref1->offset > ref2->offset)
  61. return 1;
  62. } else {
  63. if (ref1->parent < ref2->parent)
  64. return -1;
  65. if (ref1->parent > ref2->parent)
  66. return 1;
  67. }
  68. return 0;
  69. }
  70. static int comp_refs(struct btrfs_delayed_ref_node *ref1,
  71. struct btrfs_delayed_ref_node *ref2,
  72. bool check_seq)
  73. {
  74. int ret = 0;
  75. if (ref1->type < ref2->type)
  76. return -1;
  77. if (ref1->type > ref2->type)
  78. return 1;
  79. if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
  80. ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
  81. ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
  82. btrfs_delayed_node_to_tree_ref(ref2));
  83. else
  84. ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
  85. btrfs_delayed_node_to_data_ref(ref2));
  86. if (ret)
  87. return ret;
  88. if (check_seq) {
  89. if (ref1->seq < ref2->seq)
  90. return -1;
  91. if (ref1->seq > ref2->seq)
  92. return 1;
  93. }
  94. return 0;
  95. }
  96. /* insert a new ref to head ref rbtree */
  97. static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
  98. struct rb_node *node)
  99. {
  100. struct rb_node **p = &root->rb_node;
  101. struct rb_node *parent_node = NULL;
  102. struct btrfs_delayed_ref_head *entry;
  103. struct btrfs_delayed_ref_head *ins;
  104. u64 bytenr;
  105. ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
  106. bytenr = ins->bytenr;
  107. while (*p) {
  108. parent_node = *p;
  109. entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
  110. href_node);
  111. if (bytenr < entry->bytenr)
  112. p = &(*p)->rb_left;
  113. else if (bytenr > entry->bytenr)
  114. p = &(*p)->rb_right;
  115. else
  116. return entry;
  117. }
  118. rb_link_node(node, parent_node, p);
  119. rb_insert_color(node, root);
  120. return NULL;
  121. }
  122. static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
  123. struct btrfs_delayed_ref_node *ins)
  124. {
  125. struct rb_node **p = &root->rb_node;
  126. struct rb_node *node = &ins->ref_node;
  127. struct rb_node *parent_node = NULL;
  128. struct btrfs_delayed_ref_node *entry;
  129. while (*p) {
  130. int comp;
  131. parent_node = *p;
  132. entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
  133. ref_node);
  134. comp = comp_refs(ins, entry, true);
  135. if (comp < 0)
  136. p = &(*p)->rb_left;
  137. else if (comp > 0)
  138. p = &(*p)->rb_right;
  139. else
  140. return entry;
  141. }
  142. rb_link_node(node, parent_node, p);
  143. rb_insert_color(node, root);
  144. return NULL;
  145. }
  146. /*
  147. * find an head entry based on bytenr. This returns the delayed ref
  148. * head if it was able to find one, or NULL if nothing was in that spot.
  149. * If return_bigger is given, the next bigger entry is returned if no exact
  150. * match is found.
  151. */
  152. static struct btrfs_delayed_ref_head *
  153. find_ref_head(struct rb_root *root, u64 bytenr,
  154. int return_bigger)
  155. {
  156. struct rb_node *n;
  157. struct btrfs_delayed_ref_head *entry;
  158. n = root->rb_node;
  159. entry = NULL;
  160. while (n) {
  161. entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
  162. if (bytenr < entry->bytenr)
  163. n = n->rb_left;
  164. else if (bytenr > entry->bytenr)
  165. n = n->rb_right;
  166. else
  167. return entry;
  168. }
  169. if (entry && return_bigger) {
  170. if (bytenr > entry->bytenr) {
  171. n = rb_next(&entry->href_node);
  172. if (!n)
  173. n = rb_first(root);
  174. entry = rb_entry(n, struct btrfs_delayed_ref_head,
  175. href_node);
  176. return entry;
  177. }
  178. return entry;
  179. }
  180. return NULL;
  181. }
  182. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  183. struct btrfs_delayed_ref_head *head)
  184. {
  185. struct btrfs_delayed_ref_root *delayed_refs;
  186. delayed_refs = &trans->transaction->delayed_refs;
  187. lockdep_assert_held(&delayed_refs->lock);
  188. if (mutex_trylock(&head->mutex))
  189. return 0;
  190. refcount_inc(&head->refs);
  191. spin_unlock(&delayed_refs->lock);
  192. mutex_lock(&head->mutex);
  193. spin_lock(&delayed_refs->lock);
  194. if (RB_EMPTY_NODE(&head->href_node)) {
  195. mutex_unlock(&head->mutex);
  196. btrfs_put_delayed_ref_head(head);
  197. return -EAGAIN;
  198. }
  199. btrfs_put_delayed_ref_head(head);
  200. return 0;
  201. }
  202. static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
  203. struct btrfs_delayed_ref_root *delayed_refs,
  204. struct btrfs_delayed_ref_head *head,
  205. struct btrfs_delayed_ref_node *ref)
  206. {
  207. lockdep_assert_held(&head->lock);
  208. rb_erase(&ref->ref_node, &head->ref_tree);
  209. RB_CLEAR_NODE(&ref->ref_node);
  210. if (!list_empty(&ref->add_list))
  211. list_del(&ref->add_list);
  212. ref->in_tree = 0;
  213. btrfs_put_delayed_ref(ref);
  214. atomic_dec(&delayed_refs->num_entries);
  215. if (trans->delayed_ref_updates)
  216. trans->delayed_ref_updates--;
  217. }
  218. static bool merge_ref(struct btrfs_trans_handle *trans,
  219. struct btrfs_delayed_ref_root *delayed_refs,
  220. struct btrfs_delayed_ref_head *head,
  221. struct btrfs_delayed_ref_node *ref,
  222. u64 seq)
  223. {
  224. struct btrfs_delayed_ref_node *next;
  225. struct rb_node *node = rb_next(&ref->ref_node);
  226. bool done = false;
  227. while (!done && node) {
  228. int mod;
  229. next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  230. node = rb_next(node);
  231. if (seq && next->seq >= seq)
  232. break;
  233. if (comp_refs(ref, next, false))
  234. break;
  235. if (ref->action == next->action) {
  236. mod = next->ref_mod;
  237. } else {
  238. if (ref->ref_mod < next->ref_mod) {
  239. swap(ref, next);
  240. done = true;
  241. }
  242. mod = -next->ref_mod;
  243. }
  244. drop_delayed_ref(trans, delayed_refs, head, next);
  245. ref->ref_mod += mod;
  246. if (ref->ref_mod == 0) {
  247. drop_delayed_ref(trans, delayed_refs, head, ref);
  248. done = true;
  249. } else {
  250. /*
  251. * Can't have multiples of the same ref on a tree block.
  252. */
  253. WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
  254. ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
  255. }
  256. }
  257. return done;
  258. }
  259. void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
  260. struct btrfs_fs_info *fs_info,
  261. struct btrfs_delayed_ref_root *delayed_refs,
  262. struct btrfs_delayed_ref_head *head)
  263. {
  264. struct btrfs_delayed_ref_node *ref;
  265. struct rb_node *node;
  266. u64 seq = 0;
  267. lockdep_assert_held(&head->lock);
  268. if (RB_EMPTY_ROOT(&head->ref_tree))
  269. return;
  270. /* We don't have too many refs to merge for data. */
  271. if (head->is_data)
  272. return;
  273. spin_lock(&fs_info->tree_mod_seq_lock);
  274. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  275. struct seq_list *elem;
  276. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  277. struct seq_list, list);
  278. seq = elem->seq;
  279. }
  280. spin_unlock(&fs_info->tree_mod_seq_lock);
  281. again:
  282. for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
  283. ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  284. if (seq && ref->seq >= seq)
  285. continue;
  286. if (merge_ref(trans, delayed_refs, head, ref, seq))
  287. goto again;
  288. }
  289. }
  290. int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
  291. struct btrfs_delayed_ref_root *delayed_refs,
  292. u64 seq)
  293. {
  294. struct seq_list *elem;
  295. int ret = 0;
  296. spin_lock(&fs_info->tree_mod_seq_lock);
  297. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  298. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  299. struct seq_list, list);
  300. if (seq >= elem->seq) {
  301. btrfs_debug(fs_info,
  302. "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
  303. (u32)(seq >> 32), (u32)seq,
  304. (u32)(elem->seq >> 32), (u32)elem->seq,
  305. delayed_refs);
  306. ret = 1;
  307. }
  308. }
  309. spin_unlock(&fs_info->tree_mod_seq_lock);
  310. return ret;
  311. }
  312. struct btrfs_delayed_ref_head *
  313. btrfs_select_ref_head(struct btrfs_trans_handle *trans)
  314. {
  315. struct btrfs_delayed_ref_root *delayed_refs;
  316. struct btrfs_delayed_ref_head *head;
  317. u64 start;
  318. bool loop = false;
  319. delayed_refs = &trans->transaction->delayed_refs;
  320. again:
  321. start = delayed_refs->run_delayed_start;
  322. head = find_ref_head(&delayed_refs->href_root, start, 1);
  323. if (!head && !loop) {
  324. delayed_refs->run_delayed_start = 0;
  325. start = 0;
  326. loop = true;
  327. head = find_ref_head(&delayed_refs->href_root, start, 1);
  328. if (!head)
  329. return NULL;
  330. } else if (!head && loop) {
  331. return NULL;
  332. }
  333. while (head->processing) {
  334. struct rb_node *node;
  335. node = rb_next(&head->href_node);
  336. if (!node) {
  337. if (loop)
  338. return NULL;
  339. delayed_refs->run_delayed_start = 0;
  340. start = 0;
  341. loop = true;
  342. goto again;
  343. }
  344. head = rb_entry(node, struct btrfs_delayed_ref_head,
  345. href_node);
  346. }
  347. head->processing = 1;
  348. WARN_ON(delayed_refs->num_heads_ready == 0);
  349. delayed_refs->num_heads_ready--;
  350. delayed_refs->run_delayed_start = head->bytenr +
  351. head->num_bytes;
  352. return head;
  353. }
  354. /*
  355. * Helper to insert the ref_node to the tail or merge with tail.
  356. *
  357. * Return 0 for insert.
  358. * Return >0 for merge.
  359. */
  360. static int insert_delayed_ref(struct btrfs_trans_handle *trans,
  361. struct btrfs_delayed_ref_root *root,
  362. struct btrfs_delayed_ref_head *href,
  363. struct btrfs_delayed_ref_node *ref)
  364. {
  365. struct btrfs_delayed_ref_node *exist;
  366. int mod;
  367. int ret = 0;
  368. spin_lock(&href->lock);
  369. exist = tree_insert(&href->ref_tree, ref);
  370. if (!exist)
  371. goto inserted;
  372. /* Now we are sure we can merge */
  373. ret = 1;
  374. if (exist->action == ref->action) {
  375. mod = ref->ref_mod;
  376. } else {
  377. /* Need to change action */
  378. if (exist->ref_mod < ref->ref_mod) {
  379. exist->action = ref->action;
  380. mod = -exist->ref_mod;
  381. exist->ref_mod = ref->ref_mod;
  382. if (ref->action == BTRFS_ADD_DELAYED_REF)
  383. list_add_tail(&exist->add_list,
  384. &href->ref_add_list);
  385. else if (ref->action == BTRFS_DROP_DELAYED_REF) {
  386. ASSERT(!list_empty(&exist->add_list));
  387. list_del(&exist->add_list);
  388. } else {
  389. ASSERT(0);
  390. }
  391. } else
  392. mod = -ref->ref_mod;
  393. }
  394. exist->ref_mod += mod;
  395. /* remove existing tail if its ref_mod is zero */
  396. if (exist->ref_mod == 0)
  397. drop_delayed_ref(trans, root, href, exist);
  398. spin_unlock(&href->lock);
  399. return ret;
  400. inserted:
  401. if (ref->action == BTRFS_ADD_DELAYED_REF)
  402. list_add_tail(&ref->add_list, &href->ref_add_list);
  403. atomic_inc(&root->num_entries);
  404. trans->delayed_ref_updates++;
  405. spin_unlock(&href->lock);
  406. return ret;
  407. }
  408. /*
  409. * helper function to update the accounting in the head ref
  410. * existing and update must have the same bytenr
  411. */
  412. static noinline void
  413. update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
  414. struct btrfs_delayed_ref_head *existing,
  415. struct btrfs_delayed_ref_head *update,
  416. int *old_ref_mod_ret)
  417. {
  418. int old_ref_mod;
  419. BUG_ON(existing->is_data != update->is_data);
  420. spin_lock(&existing->lock);
  421. if (update->must_insert_reserved) {
  422. /* if the extent was freed and then
  423. * reallocated before the delayed ref
  424. * entries were processed, we can end up
  425. * with an existing head ref without
  426. * the must_insert_reserved flag set.
  427. * Set it again here
  428. */
  429. existing->must_insert_reserved = update->must_insert_reserved;
  430. /*
  431. * update the num_bytes so we make sure the accounting
  432. * is done correctly
  433. */
  434. existing->num_bytes = update->num_bytes;
  435. }
  436. if (update->extent_op) {
  437. if (!existing->extent_op) {
  438. existing->extent_op = update->extent_op;
  439. } else {
  440. if (update->extent_op->update_key) {
  441. memcpy(&existing->extent_op->key,
  442. &update->extent_op->key,
  443. sizeof(update->extent_op->key));
  444. existing->extent_op->update_key = true;
  445. }
  446. if (update->extent_op->update_flags) {
  447. existing->extent_op->flags_to_set |=
  448. update->extent_op->flags_to_set;
  449. existing->extent_op->update_flags = true;
  450. }
  451. btrfs_free_delayed_extent_op(update->extent_op);
  452. }
  453. }
  454. /*
  455. * update the reference mod on the head to reflect this new operation,
  456. * only need the lock for this case cause we could be processing it
  457. * currently, for refs we just added we know we're a-ok.
  458. */
  459. old_ref_mod = existing->total_ref_mod;
  460. if (old_ref_mod_ret)
  461. *old_ref_mod_ret = old_ref_mod;
  462. existing->ref_mod += update->ref_mod;
  463. existing->total_ref_mod += update->ref_mod;
  464. /*
  465. * If we are going to from a positive ref mod to a negative or vice
  466. * versa we need to make sure to adjust pending_csums accordingly.
  467. */
  468. if (existing->is_data) {
  469. if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
  470. delayed_refs->pending_csums -= existing->num_bytes;
  471. if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
  472. delayed_refs->pending_csums += existing->num_bytes;
  473. }
  474. spin_unlock(&existing->lock);
  475. }
  476. /*
  477. * helper function to actually insert a head node into the rbtree.
  478. * this does all the dirty work in terms of maintaining the correct
  479. * overall modification count.
  480. */
  481. static noinline struct btrfs_delayed_ref_head *
  482. add_delayed_ref_head(struct btrfs_fs_info *fs_info,
  483. struct btrfs_trans_handle *trans,
  484. struct btrfs_delayed_ref_head *head_ref,
  485. struct btrfs_qgroup_extent_record *qrecord,
  486. u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
  487. int action, int is_data, int is_system,
  488. int *qrecord_inserted_ret,
  489. int *old_ref_mod, int *new_ref_mod)
  490. {
  491. struct btrfs_delayed_ref_head *existing;
  492. struct btrfs_delayed_ref_root *delayed_refs;
  493. int count_mod = 1;
  494. int must_insert_reserved = 0;
  495. int qrecord_inserted = 0;
  496. /* If reserved is provided, it must be a data extent. */
  497. BUG_ON(!is_data && reserved);
  498. /*
  499. * the head node stores the sum of all the mods, so dropping a ref
  500. * should drop the sum in the head node by one.
  501. */
  502. if (action == BTRFS_UPDATE_DELAYED_HEAD)
  503. count_mod = 0;
  504. else if (action == BTRFS_DROP_DELAYED_REF)
  505. count_mod = -1;
  506. /*
  507. * BTRFS_ADD_DELAYED_EXTENT means that we need to update
  508. * the reserved accounting when the extent is finally added, or
  509. * if a later modification deletes the delayed ref without ever
  510. * inserting the extent into the extent allocation tree.
  511. * ref->must_insert_reserved is the flag used to record
  512. * that accounting mods are required.
  513. *
  514. * Once we record must_insert_reserved, switch the action to
  515. * BTRFS_ADD_DELAYED_REF because other special casing is not required.
  516. */
  517. if (action == BTRFS_ADD_DELAYED_EXTENT)
  518. must_insert_reserved = 1;
  519. else
  520. must_insert_reserved = 0;
  521. delayed_refs = &trans->transaction->delayed_refs;
  522. refcount_set(&head_ref->refs, 1);
  523. head_ref->bytenr = bytenr;
  524. head_ref->num_bytes = num_bytes;
  525. head_ref->ref_mod = count_mod;
  526. head_ref->must_insert_reserved = must_insert_reserved;
  527. head_ref->is_data = is_data;
  528. head_ref->is_system = is_system;
  529. head_ref->ref_tree = RB_ROOT;
  530. INIT_LIST_HEAD(&head_ref->ref_add_list);
  531. RB_CLEAR_NODE(&head_ref->href_node);
  532. head_ref->processing = 0;
  533. head_ref->total_ref_mod = count_mod;
  534. head_ref->qgroup_reserved = 0;
  535. head_ref->qgroup_ref_root = 0;
  536. spin_lock_init(&head_ref->lock);
  537. mutex_init(&head_ref->mutex);
  538. /* Record qgroup extent info if provided */
  539. if (qrecord) {
  540. if (ref_root && reserved) {
  541. head_ref->qgroup_ref_root = ref_root;
  542. head_ref->qgroup_reserved = reserved;
  543. }
  544. qrecord->bytenr = bytenr;
  545. qrecord->num_bytes = num_bytes;
  546. qrecord->old_roots = NULL;
  547. if(btrfs_qgroup_trace_extent_nolock(fs_info,
  548. delayed_refs, qrecord))
  549. kfree(qrecord);
  550. else
  551. qrecord_inserted = 1;
  552. }
  553. trace_add_delayed_ref_head(fs_info, head_ref, action);
  554. existing = htree_insert(&delayed_refs->href_root,
  555. &head_ref->href_node);
  556. if (existing) {
  557. WARN_ON(ref_root && reserved && existing->qgroup_ref_root
  558. && existing->qgroup_reserved);
  559. update_existing_head_ref(delayed_refs, existing, head_ref,
  560. old_ref_mod);
  561. /*
  562. * we've updated the existing ref, free the newly
  563. * allocated ref
  564. */
  565. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  566. head_ref = existing;
  567. } else {
  568. if (old_ref_mod)
  569. *old_ref_mod = 0;
  570. if (is_data && count_mod < 0)
  571. delayed_refs->pending_csums += num_bytes;
  572. delayed_refs->num_heads++;
  573. delayed_refs->num_heads_ready++;
  574. atomic_inc(&delayed_refs->num_entries);
  575. trans->delayed_ref_updates++;
  576. }
  577. if (qrecord_inserted_ret)
  578. *qrecord_inserted_ret = qrecord_inserted;
  579. if (new_ref_mod)
  580. *new_ref_mod = head_ref->total_ref_mod;
  581. return head_ref;
  582. }
  583. /*
  584. * helper to insert a delayed tree ref into the rbtree.
  585. */
  586. static noinline void
  587. add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  588. struct btrfs_trans_handle *trans,
  589. struct btrfs_delayed_ref_head *head_ref,
  590. struct btrfs_delayed_ref_node *ref, u64 bytenr,
  591. u64 num_bytes, u64 parent, u64 ref_root, int level,
  592. int action)
  593. {
  594. struct btrfs_delayed_tree_ref *full_ref;
  595. struct btrfs_delayed_ref_root *delayed_refs;
  596. u64 seq = 0;
  597. int ret;
  598. if (action == BTRFS_ADD_DELAYED_EXTENT)
  599. action = BTRFS_ADD_DELAYED_REF;
  600. if (is_fstree(ref_root))
  601. seq = atomic64_read(&fs_info->tree_mod_seq);
  602. delayed_refs = &trans->transaction->delayed_refs;
  603. /* first set the basic ref node struct up */
  604. refcount_set(&ref->refs, 1);
  605. ref->bytenr = bytenr;
  606. ref->num_bytes = num_bytes;
  607. ref->ref_mod = 1;
  608. ref->action = action;
  609. ref->is_head = 0;
  610. ref->in_tree = 1;
  611. ref->seq = seq;
  612. RB_CLEAR_NODE(&ref->ref_node);
  613. INIT_LIST_HEAD(&ref->add_list);
  614. full_ref = btrfs_delayed_node_to_tree_ref(ref);
  615. full_ref->parent = parent;
  616. full_ref->root = ref_root;
  617. if (parent)
  618. ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
  619. else
  620. ref->type = BTRFS_TREE_BLOCK_REF_KEY;
  621. full_ref->level = level;
  622. trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
  623. ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
  624. /*
  625. * XXX: memory should be freed at the same level allocated.
  626. * But bad practice is anywhere... Follow it now. Need cleanup.
  627. */
  628. if (ret > 0)
  629. kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
  630. }
  631. /*
  632. * helper to insert a delayed data ref into the rbtree.
  633. */
  634. static noinline void
  635. add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  636. struct btrfs_trans_handle *trans,
  637. struct btrfs_delayed_ref_head *head_ref,
  638. struct btrfs_delayed_ref_node *ref, u64 bytenr,
  639. u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
  640. u64 offset, int action)
  641. {
  642. struct btrfs_delayed_data_ref *full_ref;
  643. struct btrfs_delayed_ref_root *delayed_refs;
  644. u64 seq = 0;
  645. int ret;
  646. if (action == BTRFS_ADD_DELAYED_EXTENT)
  647. action = BTRFS_ADD_DELAYED_REF;
  648. delayed_refs = &trans->transaction->delayed_refs;
  649. if (is_fstree(ref_root))
  650. seq = atomic64_read(&fs_info->tree_mod_seq);
  651. /* first set the basic ref node struct up */
  652. refcount_set(&ref->refs, 1);
  653. ref->bytenr = bytenr;
  654. ref->num_bytes = num_bytes;
  655. ref->ref_mod = 1;
  656. ref->action = action;
  657. ref->is_head = 0;
  658. ref->in_tree = 1;
  659. ref->seq = seq;
  660. RB_CLEAR_NODE(&ref->ref_node);
  661. INIT_LIST_HEAD(&ref->add_list);
  662. full_ref = btrfs_delayed_node_to_data_ref(ref);
  663. full_ref->parent = parent;
  664. full_ref->root = ref_root;
  665. if (parent)
  666. ref->type = BTRFS_SHARED_DATA_REF_KEY;
  667. else
  668. ref->type = BTRFS_EXTENT_DATA_REF_KEY;
  669. full_ref->objectid = owner;
  670. full_ref->offset = offset;
  671. trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
  672. ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
  673. if (ret > 0)
  674. kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
  675. }
  676. /*
  677. * add a delayed tree ref. This does all of the accounting required
  678. * to make sure the delayed ref is eventually processed before this
  679. * transaction commits.
  680. */
  681. int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  682. struct btrfs_trans_handle *trans,
  683. u64 bytenr, u64 num_bytes, u64 parent,
  684. u64 ref_root, int level, int action,
  685. struct btrfs_delayed_extent_op *extent_op,
  686. int *old_ref_mod, int *new_ref_mod)
  687. {
  688. struct btrfs_delayed_tree_ref *ref;
  689. struct btrfs_delayed_ref_head *head_ref;
  690. struct btrfs_delayed_ref_root *delayed_refs;
  691. struct btrfs_qgroup_extent_record *record = NULL;
  692. int qrecord_inserted;
  693. int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
  694. BUG_ON(extent_op && extent_op->is_data);
  695. ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
  696. if (!ref)
  697. return -ENOMEM;
  698. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  699. if (!head_ref)
  700. goto free_ref;
  701. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
  702. is_fstree(ref_root)) {
  703. record = kmalloc(sizeof(*record), GFP_NOFS);
  704. if (!record)
  705. goto free_head_ref;
  706. }
  707. head_ref->extent_op = extent_op;
  708. delayed_refs = &trans->transaction->delayed_refs;
  709. spin_lock(&delayed_refs->lock);
  710. /*
  711. * insert both the head node and the new ref without dropping
  712. * the spin lock
  713. */
  714. head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
  715. bytenr, num_bytes, 0, 0, action, 0,
  716. is_system, &qrecord_inserted,
  717. old_ref_mod, new_ref_mod);
  718. add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
  719. num_bytes, parent, ref_root, level, action);
  720. spin_unlock(&delayed_refs->lock);
  721. if (qrecord_inserted)
  722. btrfs_qgroup_trace_extent_post(fs_info, record);
  723. return 0;
  724. free_head_ref:
  725. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  726. free_ref:
  727. kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
  728. return -ENOMEM;
  729. }
  730. /*
  731. * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  732. */
  733. int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  734. struct btrfs_trans_handle *trans,
  735. u64 bytenr, u64 num_bytes,
  736. u64 parent, u64 ref_root,
  737. u64 owner, u64 offset, u64 reserved, int action,
  738. int *old_ref_mod, int *new_ref_mod)
  739. {
  740. struct btrfs_delayed_data_ref *ref;
  741. struct btrfs_delayed_ref_head *head_ref;
  742. struct btrfs_delayed_ref_root *delayed_refs;
  743. struct btrfs_qgroup_extent_record *record = NULL;
  744. int qrecord_inserted;
  745. ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
  746. if (!ref)
  747. return -ENOMEM;
  748. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  749. if (!head_ref) {
  750. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  751. return -ENOMEM;
  752. }
  753. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
  754. is_fstree(ref_root)) {
  755. record = kmalloc(sizeof(*record), GFP_NOFS);
  756. if (!record) {
  757. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  758. kmem_cache_free(btrfs_delayed_ref_head_cachep,
  759. head_ref);
  760. return -ENOMEM;
  761. }
  762. }
  763. head_ref->extent_op = NULL;
  764. delayed_refs = &trans->transaction->delayed_refs;
  765. spin_lock(&delayed_refs->lock);
  766. /*
  767. * insert both the head node and the new ref without dropping
  768. * the spin lock
  769. */
  770. head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
  771. bytenr, num_bytes, ref_root, reserved,
  772. action, 1, 0, &qrecord_inserted,
  773. old_ref_mod, new_ref_mod);
  774. add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
  775. num_bytes, parent, ref_root, owner, offset,
  776. action);
  777. spin_unlock(&delayed_refs->lock);
  778. if (qrecord_inserted)
  779. return btrfs_qgroup_trace_extent_post(fs_info, record);
  780. return 0;
  781. }
  782. int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
  783. struct btrfs_trans_handle *trans,
  784. u64 bytenr, u64 num_bytes,
  785. struct btrfs_delayed_extent_op *extent_op)
  786. {
  787. struct btrfs_delayed_ref_head *head_ref;
  788. struct btrfs_delayed_ref_root *delayed_refs;
  789. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  790. if (!head_ref)
  791. return -ENOMEM;
  792. head_ref->extent_op = extent_op;
  793. delayed_refs = &trans->transaction->delayed_refs;
  794. spin_lock(&delayed_refs->lock);
  795. /*
  796. * extent_ops just modify the flags of an extent and they don't result
  797. * in ref count changes, hence it's safe to pass false/0 for is_system
  798. * argument
  799. */
  800. add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
  801. num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
  802. extent_op->is_data, 0, NULL, NULL, NULL);
  803. spin_unlock(&delayed_refs->lock);
  804. return 0;
  805. }
  806. /*
  807. * this does a simple search for the head node for a given extent.
  808. * It must be called with the delayed ref spinlock held, and it returns
  809. * the head node if any where found, or NULL if not.
  810. */
  811. struct btrfs_delayed_ref_head *
  812. btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
  813. {
  814. return find_ref_head(&delayed_refs->href_root, bytenr, 0);
  815. }
  816. void __cold btrfs_delayed_ref_exit(void)
  817. {
  818. kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
  819. kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
  820. kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
  821. kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
  822. }
  823. int __init btrfs_delayed_ref_init(void)
  824. {
  825. btrfs_delayed_ref_head_cachep = kmem_cache_create(
  826. "btrfs_delayed_ref_head",
  827. sizeof(struct btrfs_delayed_ref_head), 0,
  828. SLAB_MEM_SPREAD, NULL);
  829. if (!btrfs_delayed_ref_head_cachep)
  830. goto fail;
  831. btrfs_delayed_tree_ref_cachep = kmem_cache_create(
  832. "btrfs_delayed_tree_ref",
  833. sizeof(struct btrfs_delayed_tree_ref), 0,
  834. SLAB_MEM_SPREAD, NULL);
  835. if (!btrfs_delayed_tree_ref_cachep)
  836. goto fail;
  837. btrfs_delayed_data_ref_cachep = kmem_cache_create(
  838. "btrfs_delayed_data_ref",
  839. sizeof(struct btrfs_delayed_data_ref), 0,
  840. SLAB_MEM_SPREAD, NULL);
  841. if (!btrfs_delayed_data_ref_cachep)
  842. goto fail;
  843. btrfs_delayed_extent_op_cachep = kmem_cache_create(
  844. "btrfs_delayed_extent_op",
  845. sizeof(struct btrfs_delayed_extent_op), 0,
  846. SLAB_MEM_SPREAD, NULL);
  847. if (!btrfs_delayed_extent_op_cachep)
  848. goto fail;
  849. return 0;
  850. fail:
  851. btrfs_delayed_ref_exit();
  852. return -ENOMEM;
  853. }