delayed-ref.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * Copyright (C) 2009 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/sort.h>
  21. #include "ctree.h"
  22. #include "delayed-ref.h"
  23. #include "transaction.h"
  24. #include "qgroup.h"
  25. struct kmem_cache *btrfs_delayed_ref_head_cachep;
  26. struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  27. struct kmem_cache *btrfs_delayed_data_ref_cachep;
  28. struct kmem_cache *btrfs_delayed_extent_op_cachep;
  29. /*
  30. * delayed back reference update tracking. For subvolume trees
  31. * we queue up extent allocations and backref maintenance for
  32. * delayed processing. This avoids deep call chains where we
  33. * add extents in the middle of btrfs_search_slot, and it allows
  34. * us to buffer up frequently modified backrefs in an rb tree instead
  35. * of hammering updates on the extent allocation tree.
  36. */
  37. /*
  38. * compare two delayed tree backrefs with same bytenr and type
  39. */
  40. static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
  41. struct btrfs_delayed_tree_ref *ref1, int type)
  42. {
  43. if (type == BTRFS_TREE_BLOCK_REF_KEY) {
  44. if (ref1->root < ref2->root)
  45. return -1;
  46. if (ref1->root > ref2->root)
  47. return 1;
  48. } else {
  49. if (ref1->parent < ref2->parent)
  50. return -1;
  51. if (ref1->parent > ref2->parent)
  52. return 1;
  53. }
  54. return 0;
  55. }
  56. /*
  57. * compare two delayed data backrefs with same bytenr and type
  58. */
  59. static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  60. struct btrfs_delayed_data_ref *ref1)
  61. {
  62. if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  63. if (ref1->root < ref2->root)
  64. return -1;
  65. if (ref1->root > ref2->root)
  66. return 1;
  67. if (ref1->objectid < ref2->objectid)
  68. return -1;
  69. if (ref1->objectid > ref2->objectid)
  70. return 1;
  71. if (ref1->offset < ref2->offset)
  72. return -1;
  73. if (ref1->offset > ref2->offset)
  74. return 1;
  75. } else {
  76. if (ref1->parent < ref2->parent)
  77. return -1;
  78. if (ref1->parent > ref2->parent)
  79. return 1;
  80. }
  81. return 0;
  82. }
  83. /* insert a new ref to head ref rbtree */
  84. static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
  85. struct rb_node *node)
  86. {
  87. struct rb_node **p = &root->rb_node;
  88. struct rb_node *parent_node = NULL;
  89. struct btrfs_delayed_ref_head *entry;
  90. struct btrfs_delayed_ref_head *ins;
  91. u64 bytenr;
  92. ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
  93. bytenr = ins->node.bytenr;
  94. while (*p) {
  95. parent_node = *p;
  96. entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
  97. href_node);
  98. if (bytenr < entry->node.bytenr)
  99. p = &(*p)->rb_left;
  100. else if (bytenr > entry->node.bytenr)
  101. p = &(*p)->rb_right;
  102. else
  103. return entry;
  104. }
  105. rb_link_node(node, parent_node, p);
  106. rb_insert_color(node, root);
  107. return NULL;
  108. }
  109. /*
  110. * find an head entry based on bytenr. This returns the delayed ref
  111. * head if it was able to find one, or NULL if nothing was in that spot.
  112. * If return_bigger is given, the next bigger entry is returned if no exact
  113. * match is found.
  114. */
  115. static struct btrfs_delayed_ref_head *
  116. find_ref_head(struct rb_root *root, u64 bytenr,
  117. int return_bigger)
  118. {
  119. struct rb_node *n;
  120. struct btrfs_delayed_ref_head *entry;
  121. n = root->rb_node;
  122. entry = NULL;
  123. while (n) {
  124. entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
  125. if (bytenr < entry->node.bytenr)
  126. n = n->rb_left;
  127. else if (bytenr > entry->node.bytenr)
  128. n = n->rb_right;
  129. else
  130. return entry;
  131. }
  132. if (entry && return_bigger) {
  133. if (bytenr > entry->node.bytenr) {
  134. n = rb_next(&entry->href_node);
  135. if (!n)
  136. n = rb_first(root);
  137. entry = rb_entry(n, struct btrfs_delayed_ref_head,
  138. href_node);
  139. return entry;
  140. }
  141. return entry;
  142. }
  143. return NULL;
  144. }
  145. int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
  146. struct btrfs_delayed_ref_head *head)
  147. {
  148. struct btrfs_delayed_ref_root *delayed_refs;
  149. delayed_refs = &trans->transaction->delayed_refs;
  150. assert_spin_locked(&delayed_refs->lock);
  151. if (mutex_trylock(&head->mutex))
  152. return 0;
  153. atomic_inc(&head->node.refs);
  154. spin_unlock(&delayed_refs->lock);
  155. mutex_lock(&head->mutex);
  156. spin_lock(&delayed_refs->lock);
  157. if (!head->node.in_tree) {
  158. mutex_unlock(&head->mutex);
  159. btrfs_put_delayed_ref(&head->node);
  160. return -EAGAIN;
  161. }
  162. btrfs_put_delayed_ref(&head->node);
  163. return 0;
  164. }
  165. static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
  166. struct btrfs_delayed_ref_root *delayed_refs,
  167. struct btrfs_delayed_ref_head *head,
  168. struct btrfs_delayed_ref_node *ref)
  169. {
  170. if (btrfs_delayed_ref_is_head(ref)) {
  171. head = btrfs_delayed_node_to_head(ref);
  172. rb_erase(&head->href_node, &delayed_refs->href_root);
  173. } else {
  174. assert_spin_locked(&head->lock);
  175. list_del(&ref->list);
  176. }
  177. ref->in_tree = 0;
  178. btrfs_put_delayed_ref(ref);
  179. atomic_dec(&delayed_refs->num_entries);
  180. if (trans->delayed_ref_updates)
  181. trans->delayed_ref_updates--;
  182. }
  183. int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
  184. struct btrfs_delayed_ref_root *delayed_refs,
  185. u64 seq)
  186. {
  187. struct seq_list *elem;
  188. int ret = 0;
  189. spin_lock(&fs_info->tree_mod_seq_lock);
  190. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  191. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  192. struct seq_list, list);
  193. if (seq >= elem->seq) {
  194. pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
  195. (u32)(seq >> 32), (u32)seq,
  196. (u32)(elem->seq >> 32), (u32)elem->seq,
  197. delayed_refs);
  198. ret = 1;
  199. }
  200. }
  201. spin_unlock(&fs_info->tree_mod_seq_lock);
  202. return ret;
  203. }
  204. struct btrfs_delayed_ref_head *
  205. btrfs_select_ref_head(struct btrfs_trans_handle *trans)
  206. {
  207. struct btrfs_delayed_ref_root *delayed_refs;
  208. struct btrfs_delayed_ref_head *head;
  209. u64 start;
  210. bool loop = false;
  211. delayed_refs = &trans->transaction->delayed_refs;
  212. again:
  213. start = delayed_refs->run_delayed_start;
  214. head = find_ref_head(&delayed_refs->href_root, start, 1);
  215. if (!head && !loop) {
  216. delayed_refs->run_delayed_start = 0;
  217. start = 0;
  218. loop = true;
  219. head = find_ref_head(&delayed_refs->href_root, start, 1);
  220. if (!head)
  221. return NULL;
  222. } else if (!head && loop) {
  223. return NULL;
  224. }
  225. while (head->processing) {
  226. struct rb_node *node;
  227. node = rb_next(&head->href_node);
  228. if (!node) {
  229. if (loop)
  230. return NULL;
  231. delayed_refs->run_delayed_start = 0;
  232. start = 0;
  233. loop = true;
  234. goto again;
  235. }
  236. head = rb_entry(node, struct btrfs_delayed_ref_head,
  237. href_node);
  238. }
  239. head->processing = 1;
  240. WARN_ON(delayed_refs->num_heads_ready == 0);
  241. delayed_refs->num_heads_ready--;
  242. delayed_refs->run_delayed_start = head->node.bytenr +
  243. head->node.num_bytes;
  244. return head;
  245. }
  246. /*
  247. * Helper to insert the ref_node to the tail or merge with tail.
  248. *
  249. * Return 0 for insert.
  250. * Return >0 for merge.
  251. */
  252. static int
  253. add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
  254. struct btrfs_delayed_ref_root *root,
  255. struct btrfs_delayed_ref_head *href,
  256. struct btrfs_delayed_ref_node *ref)
  257. {
  258. struct btrfs_delayed_ref_node *exist;
  259. int mod;
  260. int ret = 0;
  261. spin_lock(&href->lock);
  262. /* Check whether we can merge the tail node with ref */
  263. if (list_empty(&href->ref_list))
  264. goto add_tail;
  265. exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
  266. list);
  267. /* No need to compare bytenr nor is_head */
  268. if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
  269. exist->seq != ref->seq)
  270. goto add_tail;
  271. if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
  272. exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
  273. comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
  274. btrfs_delayed_node_to_tree_ref(ref),
  275. ref->type))
  276. goto add_tail;
  277. if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
  278. exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
  279. comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
  280. btrfs_delayed_node_to_data_ref(ref)))
  281. goto add_tail;
  282. /* Now we are sure we can merge */
  283. ret = 1;
  284. if (exist->action == ref->action) {
  285. mod = ref->ref_mod;
  286. } else {
  287. /* Need to change action */
  288. if (exist->ref_mod < ref->ref_mod) {
  289. exist->action = ref->action;
  290. mod = -exist->ref_mod;
  291. exist->ref_mod = ref->ref_mod;
  292. } else
  293. mod = -ref->ref_mod;
  294. }
  295. exist->ref_mod += mod;
  296. /* remove existing tail if its ref_mod is zero */
  297. if (exist->ref_mod == 0)
  298. drop_delayed_ref(trans, root, href, exist);
  299. spin_unlock(&href->lock);
  300. return ret;
  301. add_tail:
  302. list_add_tail(&ref->list, &href->ref_list);
  303. atomic_inc(&root->num_entries);
  304. trans->delayed_ref_updates++;
  305. spin_unlock(&href->lock);
  306. return ret;
  307. }
  308. /*
  309. * helper function to update the accounting in the head ref
  310. * existing and update must have the same bytenr
  311. */
  312. static noinline void
  313. update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
  314. struct btrfs_delayed_ref_node *existing,
  315. struct btrfs_delayed_ref_node *update)
  316. {
  317. struct btrfs_delayed_ref_head *existing_ref;
  318. struct btrfs_delayed_ref_head *ref;
  319. int old_ref_mod;
  320. existing_ref = btrfs_delayed_node_to_head(existing);
  321. ref = btrfs_delayed_node_to_head(update);
  322. BUG_ON(existing_ref->is_data != ref->is_data);
  323. spin_lock(&existing_ref->lock);
  324. if (ref->must_insert_reserved) {
  325. /* if the extent was freed and then
  326. * reallocated before the delayed ref
  327. * entries were processed, we can end up
  328. * with an existing head ref without
  329. * the must_insert_reserved flag set.
  330. * Set it again here
  331. */
  332. existing_ref->must_insert_reserved = ref->must_insert_reserved;
  333. /*
  334. * update the num_bytes so we make sure the accounting
  335. * is done correctly
  336. */
  337. existing->num_bytes = update->num_bytes;
  338. }
  339. if (ref->extent_op) {
  340. if (!existing_ref->extent_op) {
  341. existing_ref->extent_op = ref->extent_op;
  342. } else {
  343. if (ref->extent_op->update_key) {
  344. memcpy(&existing_ref->extent_op->key,
  345. &ref->extent_op->key,
  346. sizeof(ref->extent_op->key));
  347. existing_ref->extent_op->update_key = 1;
  348. }
  349. if (ref->extent_op->update_flags) {
  350. existing_ref->extent_op->flags_to_set |=
  351. ref->extent_op->flags_to_set;
  352. existing_ref->extent_op->update_flags = 1;
  353. }
  354. btrfs_free_delayed_extent_op(ref->extent_op);
  355. }
  356. }
  357. /*
  358. * update the reference mod on the head to reflect this new operation,
  359. * only need the lock for this case cause we could be processing it
  360. * currently, for refs we just added we know we're a-ok.
  361. */
  362. old_ref_mod = existing_ref->total_ref_mod;
  363. existing->ref_mod += update->ref_mod;
  364. existing_ref->total_ref_mod += update->ref_mod;
  365. /*
  366. * If we are going to from a positive ref mod to a negative or vice
  367. * versa we need to make sure to adjust pending_csums accordingly.
  368. */
  369. if (existing_ref->is_data) {
  370. if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
  371. delayed_refs->pending_csums -= existing->num_bytes;
  372. if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
  373. delayed_refs->pending_csums += existing->num_bytes;
  374. }
  375. spin_unlock(&existing_ref->lock);
  376. }
  377. /*
  378. * helper function to actually insert a head node into the rbtree.
  379. * this does all the dirty work in terms of maintaining the correct
  380. * overall modification count.
  381. */
  382. static noinline struct btrfs_delayed_ref_head *
  383. add_delayed_ref_head(struct btrfs_fs_info *fs_info,
  384. struct btrfs_trans_handle *trans,
  385. struct btrfs_delayed_ref_node *ref,
  386. struct btrfs_qgroup_extent_record *qrecord,
  387. u64 bytenr, u64 num_bytes, int action, int is_data)
  388. {
  389. struct btrfs_delayed_ref_head *existing;
  390. struct btrfs_delayed_ref_head *head_ref = NULL;
  391. struct btrfs_delayed_ref_root *delayed_refs;
  392. struct btrfs_qgroup_extent_record *qexisting;
  393. int count_mod = 1;
  394. int must_insert_reserved = 0;
  395. /*
  396. * the head node stores the sum of all the mods, so dropping a ref
  397. * should drop the sum in the head node by one.
  398. */
  399. if (action == BTRFS_UPDATE_DELAYED_HEAD)
  400. count_mod = 0;
  401. else if (action == BTRFS_DROP_DELAYED_REF)
  402. count_mod = -1;
  403. /*
  404. * BTRFS_ADD_DELAYED_EXTENT means that we need to update
  405. * the reserved accounting when the extent is finally added, or
  406. * if a later modification deletes the delayed ref without ever
  407. * inserting the extent into the extent allocation tree.
  408. * ref->must_insert_reserved is the flag used to record
  409. * that accounting mods are required.
  410. *
  411. * Once we record must_insert_reserved, switch the action to
  412. * BTRFS_ADD_DELAYED_REF because other special casing is not required.
  413. */
  414. if (action == BTRFS_ADD_DELAYED_EXTENT)
  415. must_insert_reserved = 1;
  416. else
  417. must_insert_reserved = 0;
  418. delayed_refs = &trans->transaction->delayed_refs;
  419. /* first set the basic ref node struct up */
  420. atomic_set(&ref->refs, 1);
  421. ref->bytenr = bytenr;
  422. ref->num_bytes = num_bytes;
  423. ref->ref_mod = count_mod;
  424. ref->type = 0;
  425. ref->action = 0;
  426. ref->is_head = 1;
  427. ref->in_tree = 1;
  428. ref->seq = 0;
  429. head_ref = btrfs_delayed_node_to_head(ref);
  430. head_ref->must_insert_reserved = must_insert_reserved;
  431. head_ref->is_data = is_data;
  432. INIT_LIST_HEAD(&head_ref->ref_list);
  433. head_ref->processing = 0;
  434. head_ref->total_ref_mod = count_mod;
  435. /* Record qgroup extent info if provided */
  436. if (qrecord) {
  437. qrecord->bytenr = bytenr;
  438. qrecord->num_bytes = num_bytes;
  439. qrecord->old_roots = NULL;
  440. qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
  441. qrecord);
  442. if (qexisting)
  443. kfree(qrecord);
  444. }
  445. spin_lock_init(&head_ref->lock);
  446. mutex_init(&head_ref->mutex);
  447. trace_add_delayed_ref_head(ref, head_ref, action);
  448. existing = htree_insert(&delayed_refs->href_root,
  449. &head_ref->href_node);
  450. if (existing) {
  451. update_existing_head_ref(delayed_refs, &existing->node, ref);
  452. /*
  453. * we've updated the existing ref, free the newly
  454. * allocated ref
  455. */
  456. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  457. head_ref = existing;
  458. } else {
  459. if (is_data && count_mod < 0)
  460. delayed_refs->pending_csums += num_bytes;
  461. delayed_refs->num_heads++;
  462. delayed_refs->num_heads_ready++;
  463. atomic_inc(&delayed_refs->num_entries);
  464. trans->delayed_ref_updates++;
  465. }
  466. return head_ref;
  467. }
  468. /*
  469. * helper to insert a delayed tree ref into the rbtree.
  470. */
  471. static noinline void
  472. add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  473. struct btrfs_trans_handle *trans,
  474. struct btrfs_delayed_ref_head *head_ref,
  475. struct btrfs_delayed_ref_node *ref, u64 bytenr,
  476. u64 num_bytes, u64 parent, u64 ref_root, int level,
  477. int action, int no_quota)
  478. {
  479. struct btrfs_delayed_tree_ref *full_ref;
  480. struct btrfs_delayed_ref_root *delayed_refs;
  481. u64 seq = 0;
  482. int ret;
  483. if (action == BTRFS_ADD_DELAYED_EXTENT)
  484. action = BTRFS_ADD_DELAYED_REF;
  485. if (is_fstree(ref_root))
  486. seq = atomic64_read(&fs_info->tree_mod_seq);
  487. delayed_refs = &trans->transaction->delayed_refs;
  488. /* first set the basic ref node struct up */
  489. atomic_set(&ref->refs, 1);
  490. ref->bytenr = bytenr;
  491. ref->num_bytes = num_bytes;
  492. ref->ref_mod = 1;
  493. ref->action = action;
  494. ref->is_head = 0;
  495. ref->in_tree = 1;
  496. ref->no_quota = no_quota;
  497. ref->seq = seq;
  498. full_ref = btrfs_delayed_node_to_tree_ref(ref);
  499. full_ref->parent = parent;
  500. full_ref->root = ref_root;
  501. if (parent)
  502. ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
  503. else
  504. ref->type = BTRFS_TREE_BLOCK_REF_KEY;
  505. full_ref->level = level;
  506. trace_add_delayed_tree_ref(ref, full_ref, action);
  507. ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
  508. /*
  509. * XXX: memory should be freed at the same level allocated.
  510. * But bad practice is anywhere... Follow it now. Need cleanup.
  511. */
  512. if (ret > 0)
  513. kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
  514. }
  515. /*
  516. * helper to insert a delayed data ref into the rbtree.
  517. */
  518. static noinline void
  519. add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  520. struct btrfs_trans_handle *trans,
  521. struct btrfs_delayed_ref_head *head_ref,
  522. struct btrfs_delayed_ref_node *ref, u64 bytenr,
  523. u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
  524. u64 offset, int action, int no_quota)
  525. {
  526. struct btrfs_delayed_data_ref *full_ref;
  527. struct btrfs_delayed_ref_root *delayed_refs;
  528. u64 seq = 0;
  529. int ret;
  530. if (action == BTRFS_ADD_DELAYED_EXTENT)
  531. action = BTRFS_ADD_DELAYED_REF;
  532. delayed_refs = &trans->transaction->delayed_refs;
  533. if (is_fstree(ref_root))
  534. seq = atomic64_read(&fs_info->tree_mod_seq);
  535. /* first set the basic ref node struct up */
  536. atomic_set(&ref->refs, 1);
  537. ref->bytenr = bytenr;
  538. ref->num_bytes = num_bytes;
  539. ref->ref_mod = 1;
  540. ref->action = action;
  541. ref->is_head = 0;
  542. ref->in_tree = 1;
  543. ref->no_quota = no_quota;
  544. ref->seq = seq;
  545. full_ref = btrfs_delayed_node_to_data_ref(ref);
  546. full_ref->parent = parent;
  547. full_ref->root = ref_root;
  548. if (parent)
  549. ref->type = BTRFS_SHARED_DATA_REF_KEY;
  550. else
  551. ref->type = BTRFS_EXTENT_DATA_REF_KEY;
  552. full_ref->objectid = owner;
  553. full_ref->offset = offset;
  554. trace_add_delayed_data_ref(ref, full_ref, action);
  555. ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
  556. if (ret > 0)
  557. kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
  558. }
  559. /*
  560. * add a delayed tree ref. This does all of the accounting required
  561. * to make sure the delayed ref is eventually processed before this
  562. * transaction commits.
  563. */
  564. int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
  565. struct btrfs_trans_handle *trans,
  566. u64 bytenr, u64 num_bytes, u64 parent,
  567. u64 ref_root, int level, int action,
  568. struct btrfs_delayed_extent_op *extent_op,
  569. int no_quota)
  570. {
  571. struct btrfs_delayed_tree_ref *ref;
  572. struct btrfs_delayed_ref_head *head_ref;
  573. struct btrfs_delayed_ref_root *delayed_refs;
  574. struct btrfs_qgroup_extent_record *record = NULL;
  575. if (!is_fstree(ref_root) || !fs_info->quota_enabled)
  576. no_quota = 0;
  577. BUG_ON(extent_op && extent_op->is_data);
  578. ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
  579. if (!ref)
  580. return -ENOMEM;
  581. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  582. if (!head_ref)
  583. goto free_ref;
  584. if (fs_info->quota_enabled && is_fstree(ref_root)) {
  585. record = kmalloc(sizeof(*record), GFP_NOFS);
  586. if (!record)
  587. goto free_head_ref;
  588. }
  589. head_ref->extent_op = extent_op;
  590. delayed_refs = &trans->transaction->delayed_refs;
  591. spin_lock(&delayed_refs->lock);
  592. /*
  593. * insert both the head node and the new ref without dropping
  594. * the spin lock
  595. */
  596. head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
  597. bytenr, num_bytes, action, 0);
  598. add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
  599. num_bytes, parent, ref_root, level, action,
  600. no_quota);
  601. spin_unlock(&delayed_refs->lock);
  602. return 0;
  603. free_head_ref:
  604. kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
  605. free_ref:
  606. kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
  607. return -ENOMEM;
  608. }
  609. /*
  610. * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  611. */
  612. int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
  613. struct btrfs_trans_handle *trans,
  614. u64 bytenr, u64 num_bytes,
  615. u64 parent, u64 ref_root,
  616. u64 owner, u64 offset, int action,
  617. struct btrfs_delayed_extent_op *extent_op,
  618. int no_quota)
  619. {
  620. struct btrfs_delayed_data_ref *ref;
  621. struct btrfs_delayed_ref_head *head_ref;
  622. struct btrfs_delayed_ref_root *delayed_refs;
  623. struct btrfs_qgroup_extent_record *record = NULL;
  624. if (!is_fstree(ref_root) || !fs_info->quota_enabled)
  625. no_quota = 0;
  626. BUG_ON(extent_op && !extent_op->is_data);
  627. ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
  628. if (!ref)
  629. return -ENOMEM;
  630. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  631. if (!head_ref) {
  632. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  633. return -ENOMEM;
  634. }
  635. if (fs_info->quota_enabled && is_fstree(ref_root)) {
  636. record = kmalloc(sizeof(*record), GFP_NOFS);
  637. if (!record) {
  638. kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
  639. kmem_cache_free(btrfs_delayed_ref_head_cachep,
  640. head_ref);
  641. return -ENOMEM;
  642. }
  643. }
  644. head_ref->extent_op = extent_op;
  645. delayed_refs = &trans->transaction->delayed_refs;
  646. spin_lock(&delayed_refs->lock);
  647. /*
  648. * insert both the head node and the new ref without dropping
  649. * the spin lock
  650. */
  651. head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
  652. bytenr, num_bytes, action, 1);
  653. add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
  654. num_bytes, parent, ref_root, owner, offset,
  655. action, no_quota);
  656. spin_unlock(&delayed_refs->lock);
  657. return 0;
  658. }
  659. int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
  660. struct btrfs_trans_handle *trans,
  661. u64 bytenr, u64 num_bytes,
  662. struct btrfs_delayed_extent_op *extent_op)
  663. {
  664. struct btrfs_delayed_ref_head *head_ref;
  665. struct btrfs_delayed_ref_root *delayed_refs;
  666. head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
  667. if (!head_ref)
  668. return -ENOMEM;
  669. head_ref->extent_op = extent_op;
  670. delayed_refs = &trans->transaction->delayed_refs;
  671. spin_lock(&delayed_refs->lock);
  672. add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
  673. num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
  674. extent_op->is_data);
  675. spin_unlock(&delayed_refs->lock);
  676. return 0;
  677. }
  678. /*
  679. * this does a simple search for the head node for a given extent.
  680. * It must be called with the delayed ref spinlock held, and it returns
  681. * the head node if any where found, or NULL if not.
  682. */
  683. struct btrfs_delayed_ref_head *
  684. btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
  685. {
  686. struct btrfs_delayed_ref_root *delayed_refs;
  687. delayed_refs = &trans->transaction->delayed_refs;
  688. return find_ref_head(&delayed_refs->href_root, bytenr, 0);
  689. }
  690. void btrfs_delayed_ref_exit(void)
  691. {
  692. if (btrfs_delayed_ref_head_cachep)
  693. kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
  694. if (btrfs_delayed_tree_ref_cachep)
  695. kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
  696. if (btrfs_delayed_data_ref_cachep)
  697. kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
  698. if (btrfs_delayed_extent_op_cachep)
  699. kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
  700. }
  701. int btrfs_delayed_ref_init(void)
  702. {
  703. btrfs_delayed_ref_head_cachep = kmem_cache_create(
  704. "btrfs_delayed_ref_head",
  705. sizeof(struct btrfs_delayed_ref_head), 0,
  706. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  707. if (!btrfs_delayed_ref_head_cachep)
  708. goto fail;
  709. btrfs_delayed_tree_ref_cachep = kmem_cache_create(
  710. "btrfs_delayed_tree_ref",
  711. sizeof(struct btrfs_delayed_tree_ref), 0,
  712. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  713. if (!btrfs_delayed_tree_ref_cachep)
  714. goto fail;
  715. btrfs_delayed_data_ref_cachep = kmem_cache_create(
  716. "btrfs_delayed_data_ref",
  717. sizeof(struct btrfs_delayed_data_ref), 0,
  718. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  719. if (!btrfs_delayed_data_ref_cachep)
  720. goto fail;
  721. btrfs_delayed_extent_op_cachep = kmem_cache_create(
  722. "btrfs_delayed_extent_op",
  723. sizeof(struct btrfs_delayed_extent_op), 0,
  724. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  725. if (!btrfs_delayed_extent_op_cachep)
  726. goto fail;
  727. return 0;
  728. fail:
  729. btrfs_delayed_ref_exit();
  730. return -ENOMEM;
  731. }