delayed-ref.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009 Oracle. All rights reserved.
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/slab.h>
  7. #include <linux/sort.h>
  8. #include "ctree.h"
  9. #include "delayed-ref.h"
  10. #include "transaction.h"
  11. #include "qgroup.h"
  12. struct kmem_cache *btrfs_delayed_ref_head_cachep;
  13. struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  14. struct kmem_cache *btrfs_delayed_data_ref_cachep;
  15. struct kmem_cache *btrfs_delayed_extent_op_cachep;
  16. /*
  17. * delayed back reference update tracking. For subvolume trees
  18. * we queue up extent allocations and backref maintenance for
  19. * delayed processing. This avoids deep call chains where we
  20. * add extents in the middle of btrfs_search_slot, and it allows
  21. * us to buffer up frequently modified backrefs in an rb tree instead
  22. * of hammering updates on the extent allocation tree.
  23. */
  24. /*
  25. * compare two delayed tree backrefs with same bytenr and type
  26. */
  27. static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
  28. struct btrfs_delayed_tree_ref *ref2)
  29. {
  30. if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
  31. if (ref1->root < ref2->root)
  32. return -1;
  33. if (ref1->root > ref2->root)
  34. return 1;
  35. } else {
  36. if (ref1->parent < ref2->parent)
  37. return -1;
  38. if (ref1->parent > ref2->parent)
  39. return 1;
  40. }
  41. return 0;
  42. }
  43. /*
  44. * compare two delayed data backrefs with same bytenr and type
  45. */
  46. static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
  47. struct btrfs_delayed_data_ref *ref2)
  48. {
  49. if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  50. if (ref1->root < ref2->root)
  51. return -1;
  52. if (ref1->root > ref2->root)
  53. return 1;
  54. if (ref1->objectid < ref2->objectid)
  55. return -1;
  56. if (ref1->objectid > ref2->objectid)
  57. return 1;
  58. if (ref1->offset < ref2->offset)
  59. return -1;
  60. if (ref1->offset > ref2->offset)
  61. return 1;
  62. } else {
  63. if (ref1->parent < ref2->parent)
  64. return -1;
  65. if (ref1->parent > ref2->parent)
  66. return 1;
  67. }
  68. return 0;
  69. }
  70. static int comp_refs(struct btrfs_delayed_ref_node *ref1,
  71. struct btrfs_delayed_ref_node *ref2,
  72. bool check_seq)
  73. {
  74. int ret = 0;
  75. if (ref1->type < ref2->type)
  76. return -1;
  77. if (ref1->type > ref2->type)
  78. return 1;
  79. if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
  80. ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
  81. ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
  82. btrfs_delayed_node_to_tree_ref(ref2));
  83. else
  84. ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
  85. btrfs_delayed_node_to_data_ref(ref2));
  86. if (ret)
  87. return ret;
  88. if (check_seq) {
  89. if (ref1->seq < ref2->seq)
  90. return -1;
  91. if (ref1->seq > ref2->seq)
  92. return 1;
  93. }
  94. return 0;
  95. }
  96. /* insert a new ref to head ref rbtree */
  97. static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
  98. struct rb_node *node)
  99. {
  100. struct rb_node **p = &root->rb_node;
  101. struct rb_node *parent_node = NULL;
  102. struct btrfs_delayed_ref_head *entry;
  103. struct btrfs_delayed_ref_head *ins;
  104. u64 bytenr;
  105. ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
  106. bytenr = ins->bytenr;
  107. while (*p) {
  108. parent_node = *p;
  109. entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
  110. href_node);
  111. if (bytenr < entry->bytenr)
  112. p = &(*p)->rb_left;
  113. else if (bytenr > entry->bytenr)
  114. p = &(*p)->rb_right;
  115. else
  116. return entry;
  117. }
  118. rb_link_node(node, parent_node, p);
  119. rb_insert_color(node, root);
  120. return NULL;
  121. }
  122. static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
  123. struct btrfs_delayed_ref_node *ins)
  124. {
  125. struct rb_node **p = &root->rb_node;
  126. struct rb_node *node = &ins->ref_node;
  127. struct rb_node *parent_node = NULL;
  128. struct btrfs_delayed_ref_node *entry;
  129. while (*p) {
  130. int comp;
  131. parent_node = *p;
  132. entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
  133. ref_node);
  134. comp = comp_refs(ins, entry, true);
  135. if (comp < 0)
  136. p = &(*p)->rb_left;
  137. else if (comp > 0)
  138. p = &(*p)->rb_right;
  139. else
  140. return entry;
  141. }
  142. rb_link_node(node, parent_node, p);
  143. rb_insert_color(node, root);
  144. return NULL;
  145. }
  146. /*
  147. * find an head entry based on bytenr. This returns the delayed ref
  148. * head if it was able to find one, or NULL if nothing was in that spot.
  149. * If return_bigger is given, the next bigger entry is returned if no exact
  150. * match is found.
  151. */
  152. static struct btrfs_delayed_ref_head *
  153. find_ref_head(struct rb_root *root, u64 bytenr,
  154. int return_bigger)
  155. {
  156. struct rb_node *n;
  157. struct btrfs_delayed_ref_head *entry;
  158. n = root->rb_node;
  159. entry = NULL;
  160. while (n) {
  161. entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
  162. if (bytenr < entry->bytenr)
  163. n = n->rb_left;
  164. else if (bytenr > entry->bytenr)
  165. n = n->rb_right;
  166. else
  167. return entry;
  168. }
  169. if (entry && return_bigger) {
  170. if (bytenr > entry->bytenr) {
  171. n = rb_next(&entry->href_node);
  172. if (!n)
  173. n = rb_first(root);
  174. entry = rb_entry(n, struct btrfs_delayed_ref_head,
  175. href_node);
  176. return entry;
  177. }
  178. return entry;
  179. }
  180. return NULL;
  181. }
  182. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  183. struct btrfs_delayed_ref_head *head)
  184. {
  185. struct btrfs_delayed_ref_root *delayed_refs;
  186. delayed_refs = &trans->transaction->delayed_refs;
  187. lockdep_assert_held(&delayed_refs->lock);
  188. if (mutex_trylock(&head->mutex))
  189. return 0;
  190. refcount_inc(&head->refs);
  191. spin_unlock(&delayed_refs->lock);
  192. mutex_lock(&head->mutex);
  193. spin_lock(&delayed_refs->lock);
  194. if (RB_EMPTY_NODE(&head->href_node)) {
  195. mutex_unlock(&head->mutex);
  196. btrfs_put_delayed_ref_head(head);
  197. return -EAGAIN;
  198. }
  199. btrfs_put_delayed_ref_head(head);
  200. return 0;
  201. }
  202. static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
  203. struct btrfs_delayed_ref_root *delayed_refs,
  204. struct btrfs_delayed_ref_head *head,
  205. struct btrfs_delayed_ref_node *ref)
  206. {
  207. lockdep_assert_held(&head->lock);
  208. rb_erase(&ref->ref_node, &head->ref_tree);
  209. RB_CLEAR_NODE(&ref->ref_node);
  210. if (!list_empty(&ref->add_list))
  211. list_del(&ref->add_list);
  212. ref->in_tree = 0;
  213. btrfs_put_delayed_ref(ref);
  214. atomic_dec(&delayed_refs->num_entries);
  215. if (trans->delayed_ref_updates)
  216. trans->delayed_ref_updates--;
  217. }
  218. static bool merge_ref(struct btrfs_trans_handle *trans,
  219. struct btrfs_delayed_ref_root *delayed_refs,
  220. struct btrfs_delayed_ref_head *head,
  221. struct btrfs_delayed_ref_node *ref,
  222. u64 seq)
  223. {
  224. struct btrfs_delayed_ref_node *next;
  225. struct rb_node *node = rb_next(&ref->ref_node);
  226. bool done = false;
  227. while (!done && node) {
  228. int mod;
  229. next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  230. node = rb_next(node);
  231. if (seq && next->seq >= seq)
  232. break;
  233. if (comp_refs(ref, next, false))
  234. break;
  235. if (ref->action == next->action) {
  236. mod = next->ref_mod;
  237. } else {
  238. if (ref->ref_mod < next->ref_mod) {
  239. swap(ref, next);
  240. done = true;
  241. }
  242. mod = -next->ref_mod;
  243. }
  244. drop_delayed_ref(trans, delayed_refs, head, next);
  245. ref->ref_mod += mod;
  246. if (ref->ref_mod == 0) {
  247. drop_delayed_ref(trans, delayed_refs, head, ref);
  248. done = true;
  249. } else {
  250. /*
  251. * Can't have multiples of the same ref on a tree block.
  252. */
  253. WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
  254. ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
  255. }
  256. }
  257. return done;
  258. }
  259. void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
  260. struct btrfs_delayed_ref_root *delayed_refs,
  261. struct btrfs_delayed_ref_head *head)
  262. {
  263. struct btrfs_fs_info *fs_info = trans->fs_info;
  264. struct btrfs_delayed_ref_node *ref;
  265. struct rb_node *node;
  266. u64 seq = 0;
  267. lockdep_assert_held(&head->lock);
  268. if (RB_EMPTY_ROOT(&head->ref_tree))
  269. return;
  270. /* We don't have too many refs to merge for data. */
  271. if (head->is_data)
  272. return;
  273. spin_lock(&fs_info->tree_mod_seq_lock);
  274. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  275. struct seq_list *elem;
  276. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  277. struct seq_list, list);
  278. seq = elem->seq;
  279. }
  280. spin_unlock(&fs_info->tree_mod_seq_lock);
  281. again:
  282. for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
  283. ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  284. if (seq && ref->seq >= seq)
  285. continue;
  286. if (merge_ref(trans, delayed_refs, head, ref, seq))
  287. goto again;
  288. }
  289. }
  290. int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
  291. {
  292. struct seq_list *elem;
  293. int ret = 0;
  294. spin_lock(&fs_info->tree_mod_seq_lock);
  295. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  296. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  297. struct seq_list, list);
  298. if (seq >= elem->seq) {
  299. btrfs_debug(fs_info,
  300. "holding back delayed_ref %#x.%x, lowest is %#x.%x",
  301. (u32)(seq >> 32), (u32)seq,
  302. (u32)(elem->seq >> 32), (u32)elem->seq);
  303. ret = 1;
  304. }
  305. }
  306. spin_unlock(&fs_info->tree_mod_seq_lock);
  307. return ret;
  308. }
  309. struct btrfs_delayed_ref_head *
  310. btrfs_select_ref_head(struct btrfs_trans_handle *trans)
  311. {
  312. struct btrfs_delayed_ref_root *delayed_refs;
  313. struct btrfs_delayed_ref_head *head;
  314. u64 start;
  315. bool loop = false;
  316. delayed_refs = &trans->transaction->delayed_refs;
  317. again:
  318. start = delayed_refs->run_delayed_start;
  319. head = find_ref_head(&delayed_refs->href_root, start, 1);
  320. if (!head && !loop) {
  321. delayed_refs->run_delayed_start = 0;
  322. start = 0;
  323. loop = true;
  324. head = find_ref_head(&delayed_refs->href_root, start, 1);
  325. if (!head)
  326. return NULL;
  327. } else if (!head && loop) {
  328. return NULL;
  329. }
  330. while (head->processing) {
  331. struct rb_node *node;
  332. node = rb_next(&head->href_node);
  333. if (!node) {
  334. if (loop)
  335. return NULL;
  336. delayed_refs->run_delayed_start = 0;
  337. start = 0;
  338. loop = true;
  339. goto again;
  340. }
  341. head = rb_entry(node, struct btrfs_delayed_ref_head,
  342. href_node);
  343. }
  344. head->processing = 1;
  345. WARN_ON(delayed_refs->num_heads_ready == 0);
  346. delayed_refs->num_heads_ready--;
  347. delayed_refs->run_delayed_start = head->bytenr +
  348. head->num_bytes;
  349. return head;
  350. }
  351. /*
  352. * Helper to insert the ref_node to the tail or merge with tail.
  353. *
  354. * Return 0 for insert.
  355. * Return >0 for merge.
  356. */
  357. static int insert_delayed_ref(struct btrfs_trans_handle *trans,
  358. struct btrfs_delayed_ref_root *root,
  359. struct btrfs_delayed_ref_head *href,
  360. struct btrfs_delayed_ref_node *ref)
  361. {
  362. struct btrfs_delayed_ref_node *exist;
  363. int mod;
  364. int ret = 0;
  365. spin_lock(&href->lock);
  366. exist = tree_insert(&href->ref_tree, ref);
  367. if (!exist)
  368. goto inserted;
  369. /* Now we are sure we can merge */
  370. ret = 1;
  371. if (exist->action == ref->action) {
  372. mod = ref->ref_mod;
  373. } else {
  374. /* Need to change action */
  375. if (exist->ref_mod < ref->ref_mod) {
  376. exist->action = ref->action;
  377. mod = -exist->ref_mod;
  378. exist->ref_mod = ref->ref_mod;
  379. if (ref->action == BTRFS_ADD_DELAYED_REF)
  380. list_add_tail(&exist->add_list,
  381. &href->ref_add_list);
  382. else if (ref->action == BTRFS_DROP_DELAYED_REF) {
  383. ASSERT(!list_empty(&exist->add_list));
  384. list_del(&exist->add_list);
  385. } else {
  386. ASSERT(0);
  387. }
  388. } else
  389. mod = -ref->ref_mod;
  390. }
  391. exist->ref_mod += mod;
  392. /* remove existing tail if its ref_mod is zero */
  393. if (exist->ref_mod == 0)
  394. drop_delayed_ref(trans, root, href, exist);
  395. spin_unlock(&href->lock);
  396. return ret;
  397. inserted:
  398. if (ref->action == BTRFS_ADD_DELAYED_REF)
  399. list_add_tail(&ref->add_list, &href->ref_add_list);
  400. atomic_inc(&root->num_entries);
  401. trans->delayed_ref_updates++;
  402. spin_unlock(&href->lock);
  403. return ret;
  404. }
  405. /*
  406. * helper function to update the accounting in the head ref
  407. * existing and update must have the same bytenr
  408. */
  409. static noinline void
  410. update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
  411. struct btrfs_delayed_ref_head *existing,
  412. struct btrfs_delayed_ref_head *update,
  413. int *old_ref_mod_ret)
  414. {
  415. int old_ref_mod;
  416. BUG_ON(existing->is_data != update->is_data);
  417. spin_lock(&existing->lock);
  418. if (update->must_insert_reserved) {
  419. /* if the extent was freed and then
  420. * reallocated before the delayed ref
  421. * entries were processed, we can end up
  422. * with an existing head ref without
  423. * the must_insert_reserved flag set.
  424. * Set it again here
  425. */
  426. existing->must_insert_reserved = update->must_insert_reserved;
  427. /*
  428. * update the num_bytes so we make sure the accounting
  429. * is done correctly
  430. */
  431. existing->num_bytes = update->num_bytes;
  432. }
  433. if (update->extent_op) {
  434. if (!existing->extent_op) {
  435. existing->extent_op = update->extent_op;
  436. } else {
  437. if (update->extent_op->update_key) {
  438. memcpy(&existing->extent_op->key,
  439. &update->extent_op->key,
  440. sizeof(update->extent_op->key));
  441. existing->extent_op->update_key = true;
  442. }
  443. if (update->extent_op->update_flags) {
  444. existing->extent_op->flags_to_set |=
  445. update->extent_op->flags_to_set;
  446. existing->extent_op->update_flags = true;
  447. }
  448. btrfs_free_delayed_extent_op(update->extent_op);
  449. }
  450. }
  451. /*
  452. * update the reference mod on the head to reflect this new operation,
  453. * only need the lock for this case cause we could be processing it
  454. * currently, for refs we just added we know we're a-ok.
  455. */
  456. old_ref_mod = existing->total_ref_mod;
  457. if (old_ref_mod_ret)
  458. *old_ref_mod_ret = old_ref_mod;
  459. existing->ref_mod += update->ref_mod;
  460. existing->total_ref_mod += update->ref_mod;
  461. /*
  462. * If we are going to from a positive ref mod to a negative or vice
  463. * versa we need to make sure to adjust pending_csums accordingly.
  464. */
  465. if (existing->is_data) {
  466. if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
  467. delayed_refs->pending_csums -= existing->num_bytes;
  468. if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
  469. delayed_refs->pending_csums += existing->num_bytes;
  470. }
  471. spin_unlock(&existing->lock);
  472. }
  473. static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
  474. struct btrfs_qgroup_extent_record *qrecord,
  475. u64 bytenr, u64 num_bytes, u64 ref_root,
  476. u64 reserved, int action, bool is_data,
  477. bool is_system)
  478. {
  479. int count_mod = 1;
  480. int must_insert_reserved = 0;
  481. /* If reserved is provided, it must be a data extent. */
  482. BUG_ON(!is_data && reserved);
  483. /*
  484. * The head node stores the sum of all the mods, so dropping a ref
  485. * should drop the sum in the head node by one.
  486. */
  487. if (action == BTRFS_UPDATE_DELAYED_HEAD)
  488. count_mod = 0;
  489. else if (action == BTRFS_DROP_DELAYED_REF)
  490. count_mod = -1;
  491. /*
  492. * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
  493. * accounting when the extent is finally added, or if a later
  494. * modification deletes the delayed ref without ever inserting the
  495. * extent into the extent allocation tree. ref->must_insert_reserved
  496. * is the flag used to record that accounting mods are required.
  497. *
  498. * Once we record must_insert_reserved, switch the action to
  499. * BTRFS_ADD_DELAYED_REF because other special casing is not required.
  500. */
  501. if (action == BTRFS_ADD_DELAYED_EXTENT)
  502. must_insert_reserved = 1;
  503. else
  504. must_insert_reserved = 0;
  505. refcount_set(&head_ref->refs, 1);
  506. head_ref->bytenr = bytenr;
  507. head_ref->num_bytes = num_bytes;
  508. head_ref->ref_mod = count_mod;
  509. head_ref->must_insert_reserved = must_insert_reserved;
  510. head_ref->is_data = is_data;
  511. head_ref->is_system = is_system;
  512. head_ref->ref_tree = RB_ROOT;
  513. INIT_LIST_HEAD(&head_ref->ref_add_list);
  514. RB_CLEAR_NODE(&head_ref->href_node);
  515. head_ref->processing = 0;
  516. head_ref->total_ref_mod = count_mod;
  517. head_ref->qgroup_reserved = 0;
  518. head_ref->qgroup_ref_root = 0;
  519. spin_lock_init(&head_ref->lock);
  520. mutex_init(&head_ref->mutex);
  521. if (qrecord) {
  522. if (ref_root && reserved) {
  523. head_ref->qgroup_ref_root = ref_root;
  524. head_ref->qgroup_reserved = reserved;
  525. }
  526. qrecord->bytenr = bytenr;
  527. qrecord->num_bytes = num_bytes;
  528. qrecord->old_roots = NULL;
  529. }
  530. }
  531. /*
  532. * helper function to actually insert a head node into the rbtree.
  533. * this does all the dirty work in terms of maintaining the correct
  534. * overall modification count.
  535. */
  536. static noinline struct btrfs_delayed_ref_head *
  537. add_delayed_ref_head(struct btrfs_trans_handle *trans,
  538. struct btrfs_delayed_ref_head *head_ref,
  539. struct btrfs_qgroup_extent_record *qrecord,
  540. int action, int *qrecord_inserted_ret,
  541. int *old_ref_mod, int *new_ref_mod)
  542. {
  543. struct btrfs_delayed_ref_head *existing;
  544. struct btrfs_delayed_ref_root *delayed_refs;
  545. int qrecord_inserted = 0;
  546. delayed_refs = &trans->transaction->delayed_refs;
  547. /* Record qgroup extent info if provided */
  548. if (qrecord) {
  549. if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
  550. delayed_refs, qrecord))
  551. kfree(qrecord);
  552. else
  553. qrecord_inserted = 1;
  554. }
  555. trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
  556. existing = htree_insert(&delayed_refs->href_root,
  557. &head_ref->href_node);
  558. if (existing) {
  559. WARN_ON(qrecord && head_ref->qgroup_ref_root
  560. && head_ref->qgroup_reserved
  561. && existing->qgroup_ref_root
  562. && existing->qgroup_reserved);
  563. update_existing_head_ref(delayed_refs, existing, head_ref,
  564. old_ref_mod);
  565. /*
  566. * we've updated the existing ref, free the newly
  567. * allocated ref
  568. */
  569. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  570. head_ref = existing;
  571. } else {
  572. if (old_ref_mod)
  573. *old_ref_mod = 0;
  574. if (head_ref->is_data && head_ref->ref_mod < 0)
  575. delayed_refs->pending_csums += head_ref->num_bytes;
  576. delayed_refs->num_heads++;
  577. delayed_refs->num_heads_ready++;
  578. atomic_inc(&delayed_refs->num_entries);
  579. trans->delayed_ref_updates++;
  580. }
  581. if (qrecord_inserted_ret)
  582. *qrecord_inserted_ret = qrecord_inserted;
  583. if (new_ref_mod)
  584. *new_ref_mod = head_ref->total_ref_mod;
  585. return head_ref;
  586. }
  587. /*
  588. * init_delayed_ref_common - Initialize the structure which represents a
  589. * modification to a an extent.
  590. *
  591. * @fs_info: Internal to the mounted filesystem mount structure.
  592. *
  593. * @ref: The structure which is going to be initialized.
  594. *
  595. * @bytenr: The logical address of the extent for which a modification is
  596. * going to be recorded.
  597. *
  598. * @num_bytes: Size of the extent whose modification is being recorded.
  599. *
  600. * @ref_root: The id of the root where this modification has originated, this
  601. * can be either one of the well-known metadata trees or the
  602. * subvolume id which references this extent.
  603. *
  604. * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
  605. * BTRFS_ADD_DELAYED_EXTENT
  606. *
  607. * @ref_type: Holds the type of the extent which is being recorded, can be
  608. * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
  609. * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
  610. * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
  611. */
  612. static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
  613. struct btrfs_delayed_ref_node *ref,
  614. u64 bytenr, u64 num_bytes, u64 ref_root,
  615. int action, u8 ref_type)
  616. {
  617. u64 seq = 0;
  618. if (action == BTRFS_ADD_DELAYED_EXTENT)
  619. action = BTRFS_ADD_DELAYED_REF;
  620. if (is_fstree(ref_root))
  621. seq = atomic64_read(&fs_info->tree_mod_seq);
  622. refcount_set(&ref->refs, 1);
  623. ref->bytenr = bytenr;
  624. ref->num_bytes = num_bytes;
  625. ref->ref_mod = 1;
  626. ref->action = action;
  627. ref->is_head = 0;
  628. ref->in_tree = 1;
  629. ref->seq = seq;
  630. ref->type = ref_type;
  631. RB_CLEAR_NODE(&ref->ref_node);
  632. INIT_LIST_HEAD(&ref->add_list);
  633. }
  634. /*
  635. * add a delayed tree ref. This does all of the accounting required
  636. * to make sure the delayed ref is eventually processed before this
  637. * transaction commits.
  638. */
  639. int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  640. struct btrfs_trans_handle *trans,
  641. u64 bytenr, u64 num_bytes, u64 parent,
  642. u64 ref_root, int level, int action,
  643. struct btrfs_delayed_extent_op *extent_op,
  644. int *old_ref_mod, int *new_ref_mod)
  645. {
  646. struct btrfs_delayed_tree_ref *ref;
  647. struct btrfs_delayed_ref_head *head_ref;
  648. struct btrfs_delayed_ref_root *delayed_refs;
  649. struct btrfs_qgroup_extent_record *record = NULL;
  650. int qrecord_inserted;
  651. bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
  652. int ret;
  653. u8 ref_type;
  654. BUG_ON(extent_op && extent_op->is_data);
  655. ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
  656. if (!ref)
  657. return -ENOMEM;
  658. if (parent)
  659. ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
  660. else
  661. ref_type = BTRFS_TREE_BLOCK_REF_KEY;
  662. init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
  663. ref_root, action, ref_type);
  664. ref->root = ref_root;
  665. ref->parent = parent;
  666. ref->level = level;
  667. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  668. if (!head_ref)
  669. goto free_ref;
  670. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
  671. is_fstree(ref_root)) {
  672. record = kmalloc(sizeof(*record), GFP_NOFS);
  673. if (!record)
  674. goto free_head_ref;
  675. }
  676. init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
  677. ref_root, 0, action, false, is_system);
  678. head_ref->extent_op = extent_op;
  679. delayed_refs = &trans->transaction->delayed_refs;
  680. spin_lock(&delayed_refs->lock);
  681. /*
  682. * insert both the head node and the new ref without dropping
  683. * the spin lock
  684. */
  685. head_ref = add_delayed_ref_head(trans, head_ref, record,
  686. action, &qrecord_inserted,
  687. old_ref_mod, new_ref_mod);
  688. ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
  689. spin_unlock(&delayed_refs->lock);
  690. trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
  691. action == BTRFS_ADD_DELAYED_EXTENT ?
  692. BTRFS_ADD_DELAYED_REF : action);
  693. if (ret > 0)
  694. kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
  695. if (qrecord_inserted)
  696. btrfs_qgroup_trace_extent_post(fs_info, record);
  697. return 0;
  698. free_head_ref:
  699. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  700. free_ref:
  701. kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
  702. return -ENOMEM;
  703. }
  704. /*
  705. * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  706. */
  707. int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  708. struct btrfs_trans_handle *trans,
  709. u64 bytenr, u64 num_bytes,
  710. u64 parent, u64 ref_root,
  711. u64 owner, u64 offset, u64 reserved, int action,
  712. int *old_ref_mod, int *new_ref_mod)
  713. {
  714. struct btrfs_delayed_data_ref *ref;
  715. struct btrfs_delayed_ref_head *head_ref;
  716. struct btrfs_delayed_ref_root *delayed_refs;
  717. struct btrfs_qgroup_extent_record *record = NULL;
  718. int qrecord_inserted;
  719. int ret;
  720. u8 ref_type;
  721. ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
  722. if (!ref)
  723. return -ENOMEM;
  724. if (parent)
  725. ref_type = BTRFS_SHARED_DATA_REF_KEY;
  726. else
  727. ref_type = BTRFS_EXTENT_DATA_REF_KEY;
  728. init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
  729. ref_root, action, ref_type);
  730. ref->root = ref_root;
  731. ref->parent = parent;
  732. ref->objectid = owner;
  733. ref->offset = offset;
  734. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  735. if (!head_ref) {
  736. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  737. return -ENOMEM;
  738. }
  739. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
  740. is_fstree(ref_root)) {
  741. record = kmalloc(sizeof(*record), GFP_NOFS);
  742. if (!record) {
  743. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  744. kmem_cache_free(btrfs_delayed_ref_head_cachep,
  745. head_ref);
  746. return -ENOMEM;
  747. }
  748. }
  749. init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
  750. reserved, action, true, false);
  751. head_ref->extent_op = NULL;
  752. delayed_refs = &trans->transaction->delayed_refs;
  753. spin_lock(&delayed_refs->lock);
  754. /*
  755. * insert both the head node and the new ref without dropping
  756. * the spin lock
  757. */
  758. head_ref = add_delayed_ref_head(trans, head_ref, record,
  759. action, &qrecord_inserted,
  760. old_ref_mod, new_ref_mod);
  761. ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
  762. spin_unlock(&delayed_refs->lock);
  763. trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
  764. action == BTRFS_ADD_DELAYED_EXTENT ?
  765. BTRFS_ADD_DELAYED_REF : action);
  766. if (ret > 0)
  767. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  768. if (qrecord_inserted)
  769. return btrfs_qgroup_trace_extent_post(fs_info, record);
  770. return 0;
  771. }
  772. int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
  773. struct btrfs_trans_handle *trans,
  774. u64 bytenr, u64 num_bytes,
  775. struct btrfs_delayed_extent_op *extent_op)
  776. {
  777. struct btrfs_delayed_ref_head *head_ref;
  778. struct btrfs_delayed_ref_root *delayed_refs;
  779. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  780. if (!head_ref)
  781. return -ENOMEM;
  782. init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
  783. BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
  784. false);
  785. head_ref->extent_op = extent_op;
  786. delayed_refs = &trans->transaction->delayed_refs;
  787. spin_lock(&delayed_refs->lock);
  788. add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
  789. NULL, NULL, NULL);
  790. spin_unlock(&delayed_refs->lock);
  791. return 0;
  792. }
  793. /*
  794. * this does a simple search for the head node for a given extent.
  795. * It must be called with the delayed ref spinlock held, and it returns
  796. * the head node if any where found, or NULL if not.
  797. */
  798. struct btrfs_delayed_ref_head *
  799. btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
  800. {
  801. return find_ref_head(&delayed_refs->href_root, bytenr, 0);
  802. }
  803. void __cold btrfs_delayed_ref_exit(void)
  804. {
  805. kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
  806. kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
  807. kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
  808. kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
  809. }
  810. int __init btrfs_delayed_ref_init(void)
  811. {
  812. btrfs_delayed_ref_head_cachep = kmem_cache_create(
  813. "btrfs_delayed_ref_head",
  814. sizeof(struct btrfs_delayed_ref_head), 0,
  815. SLAB_MEM_SPREAD, NULL);
  816. if (!btrfs_delayed_ref_head_cachep)
  817. goto fail;
  818. btrfs_delayed_tree_ref_cachep = kmem_cache_create(
  819. "btrfs_delayed_tree_ref",
  820. sizeof(struct btrfs_delayed_tree_ref), 0,
  821. SLAB_MEM_SPREAD, NULL);
  822. if (!btrfs_delayed_tree_ref_cachep)
  823. goto fail;
  824. btrfs_delayed_data_ref_cachep = kmem_cache_create(
  825. "btrfs_delayed_data_ref",
  826. sizeof(struct btrfs_delayed_data_ref), 0,
  827. SLAB_MEM_SPREAD, NULL);
  828. if (!btrfs_delayed_data_ref_cachep)
  829. goto fail;
  830. btrfs_delayed_extent_op_cachep = kmem_cache_create(
  831. "btrfs_delayed_extent_op",
  832. sizeof(struct btrfs_delayed_extent_op), 0,
  833. SLAB_MEM_SPREAD, NULL);
  834. if (!btrfs_delayed_extent_op_cachep)
  835. goto fail;
  836. return 0;
  837. fail:
  838. btrfs_delayed_ref_exit();
  839. return -ENOMEM;
  840. }