transaction.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(atomic_read(&transaction->use_count) == 0);
  60. if (atomic_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. if (transaction->delayed_refs.pending_csums)
  64. printk(KERN_ERR "pending csums is %llu\n",
  65. transaction->delayed_refs.pending_csums);
  66. while (!list_empty(&transaction->pending_chunks)) {
  67. struct extent_map *em;
  68. em = list_first_entry(&transaction->pending_chunks,
  69. struct extent_map, list);
  70. list_del_init(&em->list);
  71. free_extent_map(em);
  72. }
  73. kmem_cache_free(btrfs_transaction_cachep, transaction);
  74. }
  75. }
  76. static void clear_btree_io_tree(struct extent_io_tree *tree)
  77. {
  78. spin_lock(&tree->lock);
  79. while (!RB_EMPTY_ROOT(&tree->state)) {
  80. struct rb_node *node;
  81. struct extent_state *state;
  82. node = rb_first(&tree->state);
  83. state = rb_entry(node, struct extent_state, rb_node);
  84. rb_erase(&state->rb_node, &tree->state);
  85. RB_CLEAR_NODE(&state->rb_node);
  86. /*
  87. * btree io trees aren't supposed to have tasks waiting for
  88. * changes in the flags of extent states ever.
  89. */
  90. ASSERT(!waitqueue_active(&state->wq));
  91. free_extent_state(state);
  92. cond_resched_lock(&tree->lock);
  93. }
  94. spin_unlock(&tree->lock);
  95. }
  96. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  97. struct btrfs_fs_info *fs_info)
  98. {
  99. struct btrfs_root *root, *tmp;
  100. down_write(&fs_info->commit_root_sem);
  101. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  102. dirty_list) {
  103. list_del_init(&root->dirty_list);
  104. free_extent_buffer(root->commit_root);
  105. root->commit_root = btrfs_root_node(root);
  106. if (is_fstree(root->objectid))
  107. btrfs_unpin_free_ino(root);
  108. clear_btree_io_tree(&root->dirty_log_pages);
  109. }
  110. up_write(&fs_info->commit_root_sem);
  111. }
  112. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  113. unsigned int type)
  114. {
  115. if (type & TRANS_EXTWRITERS)
  116. atomic_inc(&trans->num_extwriters);
  117. }
  118. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  119. unsigned int type)
  120. {
  121. if (type & TRANS_EXTWRITERS)
  122. atomic_dec(&trans->num_extwriters);
  123. }
  124. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  125. unsigned int type)
  126. {
  127. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  128. }
  129. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  130. {
  131. return atomic_read(&trans->num_extwriters);
  132. }
  133. /*
  134. * either allocate a new transaction or hop into the existing one
  135. */
  136. static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
  137. {
  138. struct btrfs_transaction *cur_trans;
  139. struct btrfs_fs_info *fs_info = root->fs_info;
  140. spin_lock(&fs_info->trans_lock);
  141. loop:
  142. /* The file system has been taken offline. No new transactions. */
  143. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  144. spin_unlock(&fs_info->trans_lock);
  145. return -EROFS;
  146. }
  147. cur_trans = fs_info->running_transaction;
  148. if (cur_trans) {
  149. if (cur_trans->aborted) {
  150. spin_unlock(&fs_info->trans_lock);
  151. return cur_trans->aborted;
  152. }
  153. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  154. spin_unlock(&fs_info->trans_lock);
  155. return -EBUSY;
  156. }
  157. atomic_inc(&cur_trans->use_count);
  158. atomic_inc(&cur_trans->num_writers);
  159. extwriter_counter_inc(cur_trans, type);
  160. spin_unlock(&fs_info->trans_lock);
  161. return 0;
  162. }
  163. spin_unlock(&fs_info->trans_lock);
  164. /*
  165. * If we are ATTACH, we just want to catch the current transaction,
  166. * and commit it. If there is no transaction, just return ENOENT.
  167. */
  168. if (type == TRANS_ATTACH)
  169. return -ENOENT;
  170. /*
  171. * JOIN_NOLOCK only happens during the transaction commit, so
  172. * it is impossible that ->running_transaction is NULL
  173. */
  174. BUG_ON(type == TRANS_JOIN_NOLOCK);
  175. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  176. if (!cur_trans)
  177. return -ENOMEM;
  178. spin_lock(&fs_info->trans_lock);
  179. if (fs_info->running_transaction) {
  180. /*
  181. * someone started a transaction after we unlocked. Make sure
  182. * to redo the checks above
  183. */
  184. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  185. goto loop;
  186. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  187. spin_unlock(&fs_info->trans_lock);
  188. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  189. return -EROFS;
  190. }
  191. atomic_set(&cur_trans->num_writers, 1);
  192. extwriter_counter_init(cur_trans, type);
  193. init_waitqueue_head(&cur_trans->writer_wait);
  194. init_waitqueue_head(&cur_trans->commit_wait);
  195. cur_trans->state = TRANS_STATE_RUNNING;
  196. /*
  197. * One for this trans handle, one so it will live on until we
  198. * commit the transaction.
  199. */
  200. atomic_set(&cur_trans->use_count, 2);
  201. cur_trans->have_free_bgs = 0;
  202. cur_trans->start_time = get_seconds();
  203. cur_trans->dirty_bg_run = 0;
  204. cur_trans->delayed_refs.href_root = RB_ROOT;
  205. cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
  206. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  207. cur_trans->delayed_refs.num_heads_ready = 0;
  208. cur_trans->delayed_refs.pending_csums = 0;
  209. cur_trans->delayed_refs.num_heads = 0;
  210. cur_trans->delayed_refs.flushing = 0;
  211. cur_trans->delayed_refs.run_delayed_start = 0;
  212. cur_trans->delayed_refs.qgroup_to_skip = 0;
  213. /*
  214. * although the tree mod log is per file system and not per transaction,
  215. * the log must never go across transaction boundaries.
  216. */
  217. smp_mb();
  218. if (!list_empty(&fs_info->tree_mod_seq_list))
  219. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
  220. "creating a fresh transaction\n");
  221. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  222. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
  223. "creating a fresh transaction\n");
  224. atomic64_set(&fs_info->tree_mod_seq, 0);
  225. spin_lock_init(&cur_trans->delayed_refs.lock);
  226. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  227. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  228. INIT_LIST_HEAD(&cur_trans->switch_commits);
  229. INIT_LIST_HEAD(&cur_trans->pending_ordered);
  230. INIT_LIST_HEAD(&cur_trans->dirty_bgs);
  231. INIT_LIST_HEAD(&cur_trans->io_bgs);
  232. mutex_init(&cur_trans->cache_write_mutex);
  233. cur_trans->num_dirty_bgs = 0;
  234. spin_lock_init(&cur_trans->dirty_bgs_lock);
  235. INIT_LIST_HEAD(&cur_trans->deleted_bgs);
  236. spin_lock_init(&cur_trans->deleted_bgs_lock);
  237. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  238. extent_io_tree_init(&cur_trans->dirty_pages,
  239. fs_info->btree_inode->i_mapping);
  240. fs_info->generation++;
  241. cur_trans->transid = fs_info->generation;
  242. fs_info->running_transaction = cur_trans;
  243. cur_trans->aborted = 0;
  244. spin_unlock(&fs_info->trans_lock);
  245. return 0;
  246. }
  247. /*
  248. * this does all the record keeping required to make sure that a reference
  249. * counted root is properly recorded in a given transaction. This is required
  250. * to make sure the old root from before we joined the transaction is deleted
  251. * when the transaction commits
  252. */
  253. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  254. struct btrfs_root *root)
  255. {
  256. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  257. root->last_trans < trans->transid) {
  258. WARN_ON(root == root->fs_info->extent_root);
  259. WARN_ON(root->commit_root != root->node);
  260. /*
  261. * see below for IN_TRANS_SETUP usage rules
  262. * we have the reloc mutex held now, so there
  263. * is only one writer in this function
  264. */
  265. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  266. /* make sure readers find IN_TRANS_SETUP before
  267. * they find our root->last_trans update
  268. */
  269. smp_wmb();
  270. spin_lock(&root->fs_info->fs_roots_radix_lock);
  271. if (root->last_trans == trans->transid) {
  272. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  273. return 0;
  274. }
  275. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  276. (unsigned long)root->root_key.objectid,
  277. BTRFS_ROOT_TRANS_TAG);
  278. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  279. root->last_trans = trans->transid;
  280. /* this is pretty tricky. We don't want to
  281. * take the relocation lock in btrfs_record_root_in_trans
  282. * unless we're really doing the first setup for this root in
  283. * this transaction.
  284. *
  285. * Normally we'd use root->last_trans as a flag to decide
  286. * if we want to take the expensive mutex.
  287. *
  288. * But, we have to set root->last_trans before we
  289. * init the relocation root, otherwise, we trip over warnings
  290. * in ctree.c. The solution used here is to flag ourselves
  291. * with root IN_TRANS_SETUP. When this is 1, we're still
  292. * fixing up the reloc trees and everyone must wait.
  293. *
  294. * When this is zero, they can trust root->last_trans and fly
  295. * through btrfs_record_root_in_trans without having to take the
  296. * lock. smp_wmb() makes sure that all the writes above are
  297. * done before we pop in the zero below
  298. */
  299. btrfs_init_reloc_root(trans, root);
  300. smp_mb__before_atomic();
  301. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  302. }
  303. return 0;
  304. }
  305. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  306. struct btrfs_root *root)
  307. {
  308. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  309. return 0;
  310. /*
  311. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  312. * and barriers
  313. */
  314. smp_rmb();
  315. if (root->last_trans == trans->transid &&
  316. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  317. return 0;
  318. mutex_lock(&root->fs_info->reloc_mutex);
  319. record_root_in_trans(trans, root);
  320. mutex_unlock(&root->fs_info->reloc_mutex);
  321. return 0;
  322. }
  323. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  324. {
  325. return (trans->state >= TRANS_STATE_BLOCKED &&
  326. trans->state < TRANS_STATE_UNBLOCKED &&
  327. !trans->aborted);
  328. }
  329. /* wait for commit against the current transaction to become unblocked
  330. * when this is done, it is safe to start a new transaction, but the current
  331. * transaction might not be fully on disk.
  332. */
  333. static void wait_current_trans(struct btrfs_root *root)
  334. {
  335. struct btrfs_transaction *cur_trans;
  336. spin_lock(&root->fs_info->trans_lock);
  337. cur_trans = root->fs_info->running_transaction;
  338. if (cur_trans && is_transaction_blocked(cur_trans)) {
  339. atomic_inc(&cur_trans->use_count);
  340. spin_unlock(&root->fs_info->trans_lock);
  341. wait_event(root->fs_info->transaction_wait,
  342. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  343. cur_trans->aborted);
  344. btrfs_put_transaction(cur_trans);
  345. } else {
  346. spin_unlock(&root->fs_info->trans_lock);
  347. }
  348. }
  349. static int may_wait_transaction(struct btrfs_root *root, int type)
  350. {
  351. if (root->fs_info->log_root_recovering)
  352. return 0;
  353. if (type == TRANS_USERSPACE)
  354. return 1;
  355. if (type == TRANS_START &&
  356. !atomic_read(&root->fs_info->open_ioctl_trans))
  357. return 1;
  358. return 0;
  359. }
  360. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  361. {
  362. if (!root->fs_info->reloc_ctl ||
  363. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  364. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  365. root->reloc_root)
  366. return false;
  367. return true;
  368. }
  369. static struct btrfs_trans_handle *
  370. start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
  371. enum btrfs_reserve_flush_enum flush)
  372. {
  373. struct btrfs_trans_handle *h;
  374. struct btrfs_transaction *cur_trans;
  375. u64 num_bytes = 0;
  376. u64 qgroup_reserved = 0;
  377. bool reloc_reserved = false;
  378. int ret;
  379. /* Send isn't supposed to start transactions. */
  380. ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
  381. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  382. return ERR_PTR(-EROFS);
  383. if (current->journal_info) {
  384. WARN_ON(type & TRANS_EXTWRITERS);
  385. h = current->journal_info;
  386. h->use_count++;
  387. WARN_ON(h->use_count > 2);
  388. h->orig_rsv = h->block_rsv;
  389. h->block_rsv = NULL;
  390. goto got_it;
  391. }
  392. /*
  393. * Do the reservation before we join the transaction so we can do all
  394. * the appropriate flushing if need be.
  395. */
  396. if (num_items > 0 && root != root->fs_info->chunk_root) {
  397. if (root->fs_info->quota_enabled &&
  398. is_fstree(root->root_key.objectid)) {
  399. qgroup_reserved = num_items * root->nodesize;
  400. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  401. if (ret)
  402. return ERR_PTR(ret);
  403. }
  404. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  405. /*
  406. * Do the reservation for the relocation root creation
  407. */
  408. if (need_reserve_reloc_root(root)) {
  409. num_bytes += root->nodesize;
  410. reloc_reserved = true;
  411. }
  412. ret = btrfs_block_rsv_add(root,
  413. &root->fs_info->trans_block_rsv,
  414. num_bytes, flush);
  415. if (ret)
  416. goto reserve_fail;
  417. }
  418. again:
  419. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  420. if (!h) {
  421. ret = -ENOMEM;
  422. goto alloc_fail;
  423. }
  424. /*
  425. * If we are JOIN_NOLOCK we're already committing a transaction and
  426. * waiting on this guy, so we don't need to do the sb_start_intwrite
  427. * because we're already holding a ref. We need this because we could
  428. * have raced in and did an fsync() on a file which can kick a commit
  429. * and then we deadlock with somebody doing a freeze.
  430. *
  431. * If we are ATTACH, it means we just want to catch the current
  432. * transaction and commit it, so we needn't do sb_start_intwrite().
  433. */
  434. if (type & __TRANS_FREEZABLE)
  435. sb_start_intwrite(root->fs_info->sb);
  436. if (may_wait_transaction(root, type))
  437. wait_current_trans(root);
  438. do {
  439. ret = join_transaction(root, type);
  440. if (ret == -EBUSY) {
  441. wait_current_trans(root);
  442. if (unlikely(type == TRANS_ATTACH))
  443. ret = -ENOENT;
  444. }
  445. } while (ret == -EBUSY);
  446. if (ret < 0) {
  447. /* We must get the transaction if we are JOIN_NOLOCK. */
  448. BUG_ON(type == TRANS_JOIN_NOLOCK);
  449. goto join_fail;
  450. }
  451. cur_trans = root->fs_info->running_transaction;
  452. h->transid = cur_trans->transid;
  453. h->transaction = cur_trans;
  454. h->blocks_used = 0;
  455. h->bytes_reserved = 0;
  456. h->chunk_bytes_reserved = 0;
  457. h->root = root;
  458. h->delayed_ref_updates = 0;
  459. h->use_count = 1;
  460. h->adding_csums = 0;
  461. h->block_rsv = NULL;
  462. h->orig_rsv = NULL;
  463. h->aborted = 0;
  464. h->qgroup_reserved = 0;
  465. h->delayed_ref_elem.seq = 0;
  466. h->type = type;
  467. h->allocating_chunk = false;
  468. h->reloc_reserved = false;
  469. h->sync = false;
  470. INIT_LIST_HEAD(&h->qgroup_ref_list);
  471. INIT_LIST_HEAD(&h->new_bgs);
  472. INIT_LIST_HEAD(&h->ordered);
  473. smp_mb();
  474. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  475. may_wait_transaction(root, type)) {
  476. current->journal_info = h;
  477. btrfs_commit_transaction(h, root);
  478. goto again;
  479. }
  480. if (num_bytes) {
  481. trace_btrfs_space_reservation(root->fs_info, "transaction",
  482. h->transid, num_bytes, 1);
  483. h->block_rsv = &root->fs_info->trans_block_rsv;
  484. h->bytes_reserved = num_bytes;
  485. h->reloc_reserved = reloc_reserved;
  486. }
  487. h->qgroup_reserved = qgroup_reserved;
  488. got_it:
  489. btrfs_record_root_in_trans(h, root);
  490. if (!current->journal_info && type != TRANS_USERSPACE)
  491. current->journal_info = h;
  492. return h;
  493. join_fail:
  494. if (type & __TRANS_FREEZABLE)
  495. sb_end_intwrite(root->fs_info->sb);
  496. kmem_cache_free(btrfs_trans_handle_cachep, h);
  497. alloc_fail:
  498. if (num_bytes)
  499. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  500. num_bytes);
  501. reserve_fail:
  502. if (qgroup_reserved)
  503. btrfs_qgroup_free(root, qgroup_reserved);
  504. return ERR_PTR(ret);
  505. }
  506. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  507. int num_items)
  508. {
  509. return start_transaction(root, num_items, TRANS_START,
  510. BTRFS_RESERVE_FLUSH_ALL);
  511. }
  512. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  513. struct btrfs_root *root, int num_items)
  514. {
  515. return start_transaction(root, num_items, TRANS_START,
  516. BTRFS_RESERVE_FLUSH_LIMIT);
  517. }
  518. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  519. {
  520. return start_transaction(root, 0, TRANS_JOIN, 0);
  521. }
  522. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  523. {
  524. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  525. }
  526. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  527. {
  528. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  529. }
  530. /*
  531. * btrfs_attach_transaction() - catch the running transaction
  532. *
  533. * It is used when we want to commit the current the transaction, but
  534. * don't want to start a new one.
  535. *
  536. * Note: If this function return -ENOENT, it just means there is no
  537. * running transaction. But it is possible that the inactive transaction
  538. * is still in the memory, not fully on disk. If you hope there is no
  539. * inactive transaction in the fs when -ENOENT is returned, you should
  540. * invoke
  541. * btrfs_attach_transaction_barrier()
  542. */
  543. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  544. {
  545. return start_transaction(root, 0, TRANS_ATTACH, 0);
  546. }
  547. /*
  548. * btrfs_attach_transaction_barrier() - catch the running transaction
  549. *
  550. * It is similar to the above function, the differentia is this one
  551. * will wait for all the inactive transactions until they fully
  552. * complete.
  553. */
  554. struct btrfs_trans_handle *
  555. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  556. {
  557. struct btrfs_trans_handle *trans;
  558. trans = start_transaction(root, 0, TRANS_ATTACH, 0);
  559. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  560. btrfs_wait_for_commit(root, 0);
  561. return trans;
  562. }
  563. /* wait for a transaction commit to be fully complete */
  564. static noinline void wait_for_commit(struct btrfs_root *root,
  565. struct btrfs_transaction *commit)
  566. {
  567. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  568. }
  569. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  570. {
  571. struct btrfs_transaction *cur_trans = NULL, *t;
  572. int ret = 0;
  573. if (transid) {
  574. if (transid <= root->fs_info->last_trans_committed)
  575. goto out;
  576. /* find specified transaction */
  577. spin_lock(&root->fs_info->trans_lock);
  578. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  579. if (t->transid == transid) {
  580. cur_trans = t;
  581. atomic_inc(&cur_trans->use_count);
  582. ret = 0;
  583. break;
  584. }
  585. if (t->transid > transid) {
  586. ret = 0;
  587. break;
  588. }
  589. }
  590. spin_unlock(&root->fs_info->trans_lock);
  591. /*
  592. * The specified transaction doesn't exist, or we
  593. * raced with btrfs_commit_transaction
  594. */
  595. if (!cur_trans) {
  596. if (transid > root->fs_info->last_trans_committed)
  597. ret = -EINVAL;
  598. goto out;
  599. }
  600. } else {
  601. /* find newest transaction that is committing | committed */
  602. spin_lock(&root->fs_info->trans_lock);
  603. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  604. list) {
  605. if (t->state >= TRANS_STATE_COMMIT_START) {
  606. if (t->state == TRANS_STATE_COMPLETED)
  607. break;
  608. cur_trans = t;
  609. atomic_inc(&cur_trans->use_count);
  610. break;
  611. }
  612. }
  613. spin_unlock(&root->fs_info->trans_lock);
  614. if (!cur_trans)
  615. goto out; /* nothing committing|committed */
  616. }
  617. wait_for_commit(root, cur_trans);
  618. btrfs_put_transaction(cur_trans);
  619. out:
  620. return ret;
  621. }
  622. void btrfs_throttle(struct btrfs_root *root)
  623. {
  624. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  625. wait_current_trans(root);
  626. }
  627. static int should_end_transaction(struct btrfs_trans_handle *trans,
  628. struct btrfs_root *root)
  629. {
  630. if (root->fs_info->global_block_rsv.space_info->full &&
  631. btrfs_check_space_for_delayed_refs(trans, root))
  632. return 1;
  633. return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  634. }
  635. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  636. struct btrfs_root *root)
  637. {
  638. struct btrfs_transaction *cur_trans = trans->transaction;
  639. int updates;
  640. int err;
  641. smp_mb();
  642. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  643. cur_trans->delayed_refs.flushing)
  644. return 1;
  645. updates = trans->delayed_ref_updates;
  646. trans->delayed_ref_updates = 0;
  647. if (updates) {
  648. err = btrfs_run_delayed_refs(trans, root, updates * 2);
  649. if (err) /* Error code will also eval true */
  650. return err;
  651. }
  652. return should_end_transaction(trans, root);
  653. }
  654. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  655. struct btrfs_root *root, int throttle)
  656. {
  657. struct btrfs_transaction *cur_trans = trans->transaction;
  658. struct btrfs_fs_info *info = root->fs_info;
  659. unsigned long cur = trans->delayed_ref_updates;
  660. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  661. int err = 0;
  662. int must_run_delayed_refs = 0;
  663. if (trans->use_count > 1) {
  664. trans->use_count--;
  665. trans->block_rsv = trans->orig_rsv;
  666. return 0;
  667. }
  668. btrfs_trans_release_metadata(trans, root);
  669. trans->block_rsv = NULL;
  670. if (!list_empty(&trans->new_bgs))
  671. btrfs_create_pending_block_groups(trans, root);
  672. if (!list_empty(&trans->ordered)) {
  673. spin_lock(&info->trans_lock);
  674. list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
  675. spin_unlock(&info->trans_lock);
  676. }
  677. trans->delayed_ref_updates = 0;
  678. if (!trans->sync) {
  679. must_run_delayed_refs =
  680. btrfs_should_throttle_delayed_refs(trans, root);
  681. cur = max_t(unsigned long, cur, 32);
  682. /*
  683. * don't make the caller wait if they are from a NOLOCK
  684. * or ATTACH transaction, it will deadlock with commit
  685. */
  686. if (must_run_delayed_refs == 1 &&
  687. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  688. must_run_delayed_refs = 2;
  689. }
  690. if (trans->qgroup_reserved) {
  691. /*
  692. * the same root has to be passed here between start_transaction
  693. * and end_transaction. Subvolume quota depends on this.
  694. */
  695. btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
  696. trans->qgroup_reserved = 0;
  697. }
  698. btrfs_trans_release_metadata(trans, root);
  699. trans->block_rsv = NULL;
  700. if (!list_empty(&trans->new_bgs))
  701. btrfs_create_pending_block_groups(trans, root);
  702. btrfs_trans_release_chunk_metadata(trans);
  703. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  704. should_end_transaction(trans, root) &&
  705. ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  706. spin_lock(&info->trans_lock);
  707. if (cur_trans->state == TRANS_STATE_RUNNING)
  708. cur_trans->state = TRANS_STATE_BLOCKED;
  709. spin_unlock(&info->trans_lock);
  710. }
  711. if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  712. if (throttle)
  713. return btrfs_commit_transaction(trans, root);
  714. else
  715. wake_up_process(info->transaction_kthread);
  716. }
  717. if (trans->type & __TRANS_FREEZABLE)
  718. sb_end_intwrite(root->fs_info->sb);
  719. WARN_ON(cur_trans != info->running_transaction);
  720. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  721. atomic_dec(&cur_trans->num_writers);
  722. extwriter_counter_dec(cur_trans, trans->type);
  723. smp_mb();
  724. if (waitqueue_active(&cur_trans->writer_wait))
  725. wake_up(&cur_trans->writer_wait);
  726. btrfs_put_transaction(cur_trans);
  727. if (current->journal_info == trans)
  728. current->journal_info = NULL;
  729. if (throttle)
  730. btrfs_run_delayed_iputs(root);
  731. if (trans->aborted ||
  732. test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  733. wake_up_process(info->transaction_kthread);
  734. err = -EIO;
  735. }
  736. assert_qgroups_uptodate(trans);
  737. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  738. if (must_run_delayed_refs) {
  739. btrfs_async_run_delayed_refs(root, cur,
  740. must_run_delayed_refs == 1);
  741. }
  742. return err;
  743. }
  744. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  745. struct btrfs_root *root)
  746. {
  747. return __btrfs_end_transaction(trans, root, 0);
  748. }
  749. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  750. struct btrfs_root *root)
  751. {
  752. return __btrfs_end_transaction(trans, root, 1);
  753. }
  754. /*
  755. * when btree blocks are allocated, they have some corresponding bits set for
  756. * them in one of two extent_io trees. This is used to make sure all of
  757. * those extents are sent to disk but does not wait on them
  758. */
  759. int btrfs_write_marked_extents(struct btrfs_root *root,
  760. struct extent_io_tree *dirty_pages, int mark)
  761. {
  762. int err = 0;
  763. int werr = 0;
  764. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  765. struct extent_state *cached_state = NULL;
  766. u64 start = 0;
  767. u64 end;
  768. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  769. mark, &cached_state)) {
  770. bool wait_writeback = false;
  771. err = convert_extent_bit(dirty_pages, start, end,
  772. EXTENT_NEED_WAIT,
  773. mark, &cached_state, GFP_NOFS);
  774. /*
  775. * convert_extent_bit can return -ENOMEM, which is most of the
  776. * time a temporary error. So when it happens, ignore the error
  777. * and wait for writeback of this range to finish - because we
  778. * failed to set the bit EXTENT_NEED_WAIT for the range, a call
  779. * to btrfs_wait_marked_extents() would not know that writeback
  780. * for this range started and therefore wouldn't wait for it to
  781. * finish - we don't want to commit a superblock that points to
  782. * btree nodes/leafs for which writeback hasn't finished yet
  783. * (and without errors).
  784. * We cleanup any entries left in the io tree when committing
  785. * the transaction (through clear_btree_io_tree()).
  786. */
  787. if (err == -ENOMEM) {
  788. err = 0;
  789. wait_writeback = true;
  790. }
  791. if (!err)
  792. err = filemap_fdatawrite_range(mapping, start, end);
  793. if (err)
  794. werr = err;
  795. else if (wait_writeback)
  796. werr = filemap_fdatawait_range(mapping, start, end);
  797. free_extent_state(cached_state);
  798. cached_state = NULL;
  799. cond_resched();
  800. start = end + 1;
  801. }
  802. return werr;
  803. }
  804. /*
  805. * when btree blocks are allocated, they have some corresponding bits set for
  806. * them in one of two extent_io trees. This is used to make sure all of
  807. * those extents are on disk for transaction or log commit. We wait
  808. * on all the pages and clear them from the dirty pages state tree
  809. */
  810. int btrfs_wait_marked_extents(struct btrfs_root *root,
  811. struct extent_io_tree *dirty_pages, int mark)
  812. {
  813. int err = 0;
  814. int werr = 0;
  815. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  816. struct extent_state *cached_state = NULL;
  817. u64 start = 0;
  818. u64 end;
  819. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  820. bool errors = false;
  821. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  822. EXTENT_NEED_WAIT, &cached_state)) {
  823. /*
  824. * Ignore -ENOMEM errors returned by clear_extent_bit().
  825. * When committing the transaction, we'll remove any entries
  826. * left in the io tree. For a log commit, we don't remove them
  827. * after committing the log because the tree can be accessed
  828. * concurrently - we do it only at transaction commit time when
  829. * it's safe to do it (through clear_btree_io_tree()).
  830. */
  831. err = clear_extent_bit(dirty_pages, start, end,
  832. EXTENT_NEED_WAIT,
  833. 0, 0, &cached_state, GFP_NOFS);
  834. if (err == -ENOMEM)
  835. err = 0;
  836. if (!err)
  837. err = filemap_fdatawait_range(mapping, start, end);
  838. if (err)
  839. werr = err;
  840. free_extent_state(cached_state);
  841. cached_state = NULL;
  842. cond_resched();
  843. start = end + 1;
  844. }
  845. if (err)
  846. werr = err;
  847. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  848. if ((mark & EXTENT_DIRTY) &&
  849. test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
  850. &btree_ino->runtime_flags))
  851. errors = true;
  852. if ((mark & EXTENT_NEW) &&
  853. test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
  854. &btree_ino->runtime_flags))
  855. errors = true;
  856. } else {
  857. if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
  858. &btree_ino->runtime_flags))
  859. errors = true;
  860. }
  861. if (errors && !werr)
  862. werr = -EIO;
  863. return werr;
  864. }
  865. /*
  866. * when btree blocks are allocated, they have some corresponding bits set for
  867. * them in one of two extent_io trees. This is used to make sure all of
  868. * those extents are on disk for transaction or log commit
  869. */
  870. static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  871. struct extent_io_tree *dirty_pages, int mark)
  872. {
  873. int ret;
  874. int ret2;
  875. struct blk_plug plug;
  876. blk_start_plug(&plug);
  877. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  878. blk_finish_plug(&plug);
  879. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  880. if (ret)
  881. return ret;
  882. if (ret2)
  883. return ret2;
  884. return 0;
  885. }
  886. static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  887. struct btrfs_root *root)
  888. {
  889. int ret;
  890. ret = btrfs_write_and_wait_marked_extents(root,
  891. &trans->transaction->dirty_pages,
  892. EXTENT_DIRTY);
  893. clear_btree_io_tree(&trans->transaction->dirty_pages);
  894. return ret;
  895. }
  896. /*
  897. * this is used to update the root pointer in the tree of tree roots.
  898. *
  899. * But, in the case of the extent allocation tree, updating the root
  900. * pointer may allocate blocks which may change the root of the extent
  901. * allocation tree.
  902. *
  903. * So, this loops and repeats and makes sure the cowonly root didn't
  904. * change while the root pointer was being updated in the metadata.
  905. */
  906. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  907. struct btrfs_root *root)
  908. {
  909. int ret;
  910. u64 old_root_bytenr;
  911. u64 old_root_used;
  912. struct btrfs_root *tree_root = root->fs_info->tree_root;
  913. old_root_used = btrfs_root_used(&root->root_item);
  914. while (1) {
  915. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  916. if (old_root_bytenr == root->node->start &&
  917. old_root_used == btrfs_root_used(&root->root_item))
  918. break;
  919. btrfs_set_root_node(&root->root_item, root->node);
  920. ret = btrfs_update_root(trans, tree_root,
  921. &root->root_key,
  922. &root->root_item);
  923. if (ret)
  924. return ret;
  925. old_root_used = btrfs_root_used(&root->root_item);
  926. }
  927. return 0;
  928. }
  929. /*
  930. * update all the cowonly tree roots on disk
  931. *
  932. * The error handling in this function may not be obvious. Any of the
  933. * failures will cause the file system to go offline. We still need
  934. * to clean up the delayed refs.
  935. */
  936. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  937. struct btrfs_root *root)
  938. {
  939. struct btrfs_fs_info *fs_info = root->fs_info;
  940. struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
  941. struct list_head *io_bgs = &trans->transaction->io_bgs;
  942. struct list_head *next;
  943. struct extent_buffer *eb;
  944. int ret;
  945. eb = btrfs_lock_root_node(fs_info->tree_root);
  946. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  947. 0, &eb);
  948. btrfs_tree_unlock(eb);
  949. free_extent_buffer(eb);
  950. if (ret)
  951. return ret;
  952. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  953. if (ret)
  954. return ret;
  955. ret = btrfs_run_dev_stats(trans, root->fs_info);
  956. if (ret)
  957. return ret;
  958. ret = btrfs_run_dev_replace(trans, root->fs_info);
  959. if (ret)
  960. return ret;
  961. ret = btrfs_run_qgroups(trans, root->fs_info);
  962. if (ret)
  963. return ret;
  964. ret = btrfs_setup_space_cache(trans, root);
  965. if (ret)
  966. return ret;
  967. /* run_qgroups might have added some more refs */
  968. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  969. if (ret)
  970. return ret;
  971. again:
  972. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  973. next = fs_info->dirty_cowonly_roots.next;
  974. list_del_init(next);
  975. root = list_entry(next, struct btrfs_root, dirty_list);
  976. clear_bit(BTRFS_ROOT_DIRTY, &root->state);
  977. if (root != fs_info->extent_root)
  978. list_add_tail(&root->dirty_list,
  979. &trans->transaction->switch_commits);
  980. ret = update_cowonly_root(trans, root);
  981. if (ret)
  982. return ret;
  983. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  984. if (ret)
  985. return ret;
  986. }
  987. while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
  988. ret = btrfs_write_dirty_block_groups(trans, root);
  989. if (ret)
  990. return ret;
  991. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  992. if (ret)
  993. return ret;
  994. }
  995. if (!list_empty(&fs_info->dirty_cowonly_roots))
  996. goto again;
  997. list_add_tail(&fs_info->extent_root->dirty_list,
  998. &trans->transaction->switch_commits);
  999. btrfs_after_dev_replace_commit(fs_info);
  1000. return 0;
  1001. }
  1002. /*
  1003. * dead roots are old snapshots that need to be deleted. This allocates
  1004. * a dirty root struct and adds it into the list of dead roots that need to
  1005. * be deleted
  1006. */
  1007. void btrfs_add_dead_root(struct btrfs_root *root)
  1008. {
  1009. spin_lock(&root->fs_info->trans_lock);
  1010. if (list_empty(&root->root_list))
  1011. list_add_tail(&root->root_list, &root->fs_info->dead_roots);
  1012. spin_unlock(&root->fs_info->trans_lock);
  1013. }
  1014. /*
  1015. * update all the cowonly tree roots on disk
  1016. */
  1017. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  1018. struct btrfs_root *root)
  1019. {
  1020. struct btrfs_root *gang[8];
  1021. struct btrfs_fs_info *fs_info = root->fs_info;
  1022. int i;
  1023. int ret;
  1024. int err = 0;
  1025. spin_lock(&fs_info->fs_roots_radix_lock);
  1026. while (1) {
  1027. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  1028. (void **)gang, 0,
  1029. ARRAY_SIZE(gang),
  1030. BTRFS_ROOT_TRANS_TAG);
  1031. if (ret == 0)
  1032. break;
  1033. for (i = 0; i < ret; i++) {
  1034. root = gang[i];
  1035. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  1036. (unsigned long)root->root_key.objectid,
  1037. BTRFS_ROOT_TRANS_TAG);
  1038. spin_unlock(&fs_info->fs_roots_radix_lock);
  1039. btrfs_free_log(trans, root);
  1040. btrfs_update_reloc_root(trans, root);
  1041. btrfs_orphan_commit_root(trans, root);
  1042. btrfs_save_ino_cache(root, trans);
  1043. /* see comments in should_cow_block() */
  1044. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1045. smp_mb__after_atomic();
  1046. if (root->commit_root != root->node) {
  1047. list_add_tail(&root->dirty_list,
  1048. &trans->transaction->switch_commits);
  1049. btrfs_set_root_node(&root->root_item,
  1050. root->node);
  1051. }
  1052. err = btrfs_update_root(trans, fs_info->tree_root,
  1053. &root->root_key,
  1054. &root->root_item);
  1055. spin_lock(&fs_info->fs_roots_radix_lock);
  1056. if (err)
  1057. break;
  1058. }
  1059. }
  1060. spin_unlock(&fs_info->fs_roots_radix_lock);
  1061. return err;
  1062. }
  1063. /*
  1064. * defrag a given btree.
  1065. * Every leaf in the btree is read and defragged.
  1066. */
  1067. int btrfs_defrag_root(struct btrfs_root *root)
  1068. {
  1069. struct btrfs_fs_info *info = root->fs_info;
  1070. struct btrfs_trans_handle *trans;
  1071. int ret;
  1072. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  1073. return 0;
  1074. while (1) {
  1075. trans = btrfs_start_transaction(root, 0);
  1076. if (IS_ERR(trans))
  1077. return PTR_ERR(trans);
  1078. ret = btrfs_defrag_leaves(trans, root);
  1079. btrfs_end_transaction(trans, root);
  1080. btrfs_btree_balance_dirty(info->tree_root);
  1081. cond_resched();
  1082. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  1083. break;
  1084. if (btrfs_defrag_cancelled(root->fs_info)) {
  1085. pr_debug("BTRFS: defrag_root cancelled\n");
  1086. ret = -EAGAIN;
  1087. break;
  1088. }
  1089. }
  1090. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  1091. return ret;
  1092. }
  1093. /*
  1094. * new snapshots need to be created at a very specific time in the
  1095. * transaction commit. This does the actual creation.
  1096. *
  1097. * Note:
  1098. * If the error which may affect the commitment of the current transaction
  1099. * happens, we should return the error number. If the error which just affect
  1100. * the creation of the pending snapshots, just return 0.
  1101. */
  1102. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  1103. struct btrfs_fs_info *fs_info,
  1104. struct btrfs_pending_snapshot *pending)
  1105. {
  1106. struct btrfs_key key;
  1107. struct btrfs_root_item *new_root_item;
  1108. struct btrfs_root *tree_root = fs_info->tree_root;
  1109. struct btrfs_root *root = pending->root;
  1110. struct btrfs_root *parent_root;
  1111. struct btrfs_block_rsv *rsv;
  1112. struct inode *parent_inode;
  1113. struct btrfs_path *path;
  1114. struct btrfs_dir_item *dir_item;
  1115. struct dentry *dentry;
  1116. struct extent_buffer *tmp;
  1117. struct extent_buffer *old;
  1118. struct timespec cur_time = CURRENT_TIME;
  1119. int ret = 0;
  1120. u64 to_reserve = 0;
  1121. u64 index = 0;
  1122. u64 objectid;
  1123. u64 root_flags;
  1124. uuid_le new_uuid;
  1125. path = btrfs_alloc_path();
  1126. if (!path) {
  1127. pending->error = -ENOMEM;
  1128. return 0;
  1129. }
  1130. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  1131. if (!new_root_item) {
  1132. pending->error = -ENOMEM;
  1133. goto root_item_alloc_fail;
  1134. }
  1135. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1136. if (pending->error)
  1137. goto no_free_objectid;
  1138. /*
  1139. * Make qgroup to skip current new snapshot's qgroupid, as it is
  1140. * accounted by later btrfs_qgroup_inherit().
  1141. */
  1142. btrfs_set_skip_qgroup(trans, objectid);
  1143. btrfs_reloc_pre_snapshot(pending, &to_reserve);
  1144. if (to_reserve > 0) {
  1145. pending->error = btrfs_block_rsv_add(root,
  1146. &pending->block_rsv,
  1147. to_reserve,
  1148. BTRFS_RESERVE_NO_FLUSH);
  1149. if (pending->error)
  1150. goto clear_skip_qgroup;
  1151. }
  1152. key.objectid = objectid;
  1153. key.offset = (u64)-1;
  1154. key.type = BTRFS_ROOT_ITEM_KEY;
  1155. rsv = trans->block_rsv;
  1156. trans->block_rsv = &pending->block_rsv;
  1157. trans->bytes_reserved = trans->block_rsv->reserved;
  1158. dentry = pending->dentry;
  1159. parent_inode = pending->dir;
  1160. parent_root = BTRFS_I(parent_inode)->root;
  1161. record_root_in_trans(trans, parent_root);
  1162. /*
  1163. * insert the directory item
  1164. */
  1165. ret = btrfs_set_inode_index(parent_inode, &index);
  1166. BUG_ON(ret); /* -ENOMEM */
  1167. /* check if there is a file/dir which has the same name. */
  1168. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1169. btrfs_ino(parent_inode),
  1170. dentry->d_name.name,
  1171. dentry->d_name.len, 0);
  1172. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1173. pending->error = -EEXIST;
  1174. goto dir_item_existed;
  1175. } else if (IS_ERR(dir_item)) {
  1176. ret = PTR_ERR(dir_item);
  1177. btrfs_abort_transaction(trans, root, ret);
  1178. goto fail;
  1179. }
  1180. btrfs_release_path(path);
  1181. /*
  1182. * pull in the delayed directory update
  1183. * and the delayed inode item
  1184. * otherwise we corrupt the FS during
  1185. * snapshot
  1186. */
  1187. ret = btrfs_run_delayed_items(trans, root);
  1188. if (ret) { /* Transaction aborted */
  1189. btrfs_abort_transaction(trans, root, ret);
  1190. goto fail;
  1191. }
  1192. record_root_in_trans(trans, root);
  1193. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1194. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1195. btrfs_check_and_init_root_item(new_root_item);
  1196. root_flags = btrfs_root_flags(new_root_item);
  1197. if (pending->readonly)
  1198. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1199. else
  1200. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1201. btrfs_set_root_flags(new_root_item, root_flags);
  1202. btrfs_set_root_generation_v2(new_root_item,
  1203. trans->transid);
  1204. uuid_le_gen(&new_uuid);
  1205. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1206. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1207. BTRFS_UUID_SIZE);
  1208. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1209. memset(new_root_item->received_uuid, 0,
  1210. sizeof(new_root_item->received_uuid));
  1211. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1212. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1213. btrfs_set_root_stransid(new_root_item, 0);
  1214. btrfs_set_root_rtransid(new_root_item, 0);
  1215. }
  1216. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1217. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1218. btrfs_set_root_otransid(new_root_item, trans->transid);
  1219. old = btrfs_lock_root_node(root);
  1220. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1221. if (ret) {
  1222. btrfs_tree_unlock(old);
  1223. free_extent_buffer(old);
  1224. btrfs_abort_transaction(trans, root, ret);
  1225. goto fail;
  1226. }
  1227. btrfs_set_lock_blocking(old);
  1228. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1229. /* clean up in any case */
  1230. btrfs_tree_unlock(old);
  1231. free_extent_buffer(old);
  1232. if (ret) {
  1233. btrfs_abort_transaction(trans, root, ret);
  1234. goto fail;
  1235. }
  1236. /* see comments in should_cow_block() */
  1237. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1238. smp_wmb();
  1239. btrfs_set_root_node(new_root_item, tmp);
  1240. /* record when the snapshot was created in key.offset */
  1241. key.offset = trans->transid;
  1242. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1243. btrfs_tree_unlock(tmp);
  1244. free_extent_buffer(tmp);
  1245. if (ret) {
  1246. btrfs_abort_transaction(trans, root, ret);
  1247. goto fail;
  1248. }
  1249. /*
  1250. * insert root back/forward references
  1251. */
  1252. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1253. parent_root->root_key.objectid,
  1254. btrfs_ino(parent_inode), index,
  1255. dentry->d_name.name, dentry->d_name.len);
  1256. if (ret) {
  1257. btrfs_abort_transaction(trans, root, ret);
  1258. goto fail;
  1259. }
  1260. key.offset = (u64)-1;
  1261. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1262. if (IS_ERR(pending->snap)) {
  1263. ret = PTR_ERR(pending->snap);
  1264. btrfs_abort_transaction(trans, root, ret);
  1265. goto fail;
  1266. }
  1267. ret = btrfs_reloc_post_snapshot(trans, pending);
  1268. if (ret) {
  1269. btrfs_abort_transaction(trans, root, ret);
  1270. goto fail;
  1271. }
  1272. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1273. if (ret) {
  1274. btrfs_abort_transaction(trans, root, ret);
  1275. goto fail;
  1276. }
  1277. ret = btrfs_insert_dir_item(trans, parent_root,
  1278. dentry->d_name.name, dentry->d_name.len,
  1279. parent_inode, &key,
  1280. BTRFS_FT_DIR, index);
  1281. /* We have check then name at the beginning, so it is impossible. */
  1282. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1283. if (ret) {
  1284. btrfs_abort_transaction(trans, root, ret);
  1285. goto fail;
  1286. }
  1287. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1288. dentry->d_name.len * 2);
  1289. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1290. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1291. if (ret) {
  1292. btrfs_abort_transaction(trans, root, ret);
  1293. goto fail;
  1294. }
  1295. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
  1296. BTRFS_UUID_KEY_SUBVOL, objectid);
  1297. if (ret) {
  1298. btrfs_abort_transaction(trans, root, ret);
  1299. goto fail;
  1300. }
  1301. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1302. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
  1303. new_root_item->received_uuid,
  1304. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1305. objectid);
  1306. if (ret && ret != -EEXIST) {
  1307. btrfs_abort_transaction(trans, root, ret);
  1308. goto fail;
  1309. }
  1310. }
  1311. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1312. if (ret) {
  1313. btrfs_abort_transaction(trans, root, ret);
  1314. goto fail;
  1315. }
  1316. /*
  1317. * account qgroup counters before qgroup_inherit()
  1318. */
  1319. ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
  1320. if (ret)
  1321. goto fail;
  1322. ret = btrfs_qgroup_account_extents(trans, fs_info);
  1323. if (ret)
  1324. goto fail;
  1325. ret = btrfs_qgroup_inherit(trans, fs_info,
  1326. root->root_key.objectid,
  1327. objectid, pending->inherit);
  1328. if (ret) {
  1329. btrfs_abort_transaction(trans, root, ret);
  1330. goto fail;
  1331. }
  1332. fail:
  1333. pending->error = ret;
  1334. dir_item_existed:
  1335. trans->block_rsv = rsv;
  1336. trans->bytes_reserved = 0;
  1337. clear_skip_qgroup:
  1338. btrfs_clear_skip_qgroup(trans);
  1339. no_free_objectid:
  1340. kfree(new_root_item);
  1341. root_item_alloc_fail:
  1342. btrfs_free_path(path);
  1343. return ret;
  1344. }
  1345. /*
  1346. * create all the snapshots we've scheduled for creation
  1347. */
  1348. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1349. struct btrfs_fs_info *fs_info)
  1350. {
  1351. struct btrfs_pending_snapshot *pending, *next;
  1352. struct list_head *head = &trans->transaction->pending_snapshots;
  1353. int ret = 0;
  1354. list_for_each_entry_safe(pending, next, head, list) {
  1355. list_del(&pending->list);
  1356. ret = create_pending_snapshot(trans, fs_info, pending);
  1357. if (ret)
  1358. break;
  1359. }
  1360. return ret;
  1361. }
  1362. static void update_super_roots(struct btrfs_root *root)
  1363. {
  1364. struct btrfs_root_item *root_item;
  1365. struct btrfs_super_block *super;
  1366. super = root->fs_info->super_copy;
  1367. root_item = &root->fs_info->chunk_root->root_item;
  1368. super->chunk_root = root_item->bytenr;
  1369. super->chunk_root_generation = root_item->generation;
  1370. super->chunk_root_level = root_item->level;
  1371. root_item = &root->fs_info->tree_root->root_item;
  1372. super->root = root_item->bytenr;
  1373. super->generation = root_item->generation;
  1374. super->root_level = root_item->level;
  1375. if (btrfs_test_opt(root, SPACE_CACHE))
  1376. super->cache_generation = root_item->generation;
  1377. if (root->fs_info->update_uuid_tree_gen)
  1378. super->uuid_tree_generation = root_item->generation;
  1379. }
  1380. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1381. {
  1382. struct btrfs_transaction *trans;
  1383. int ret = 0;
  1384. spin_lock(&info->trans_lock);
  1385. trans = info->running_transaction;
  1386. if (trans)
  1387. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1388. spin_unlock(&info->trans_lock);
  1389. return ret;
  1390. }
  1391. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1392. {
  1393. struct btrfs_transaction *trans;
  1394. int ret = 0;
  1395. spin_lock(&info->trans_lock);
  1396. trans = info->running_transaction;
  1397. if (trans)
  1398. ret = is_transaction_blocked(trans);
  1399. spin_unlock(&info->trans_lock);
  1400. return ret;
  1401. }
  1402. /*
  1403. * wait for the current transaction commit to start and block subsequent
  1404. * transaction joins
  1405. */
  1406. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1407. struct btrfs_transaction *trans)
  1408. {
  1409. wait_event(root->fs_info->transaction_blocked_wait,
  1410. trans->state >= TRANS_STATE_COMMIT_START ||
  1411. trans->aborted);
  1412. }
  1413. /*
  1414. * wait for the current transaction to start and then become unblocked.
  1415. * caller holds ref.
  1416. */
  1417. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1418. struct btrfs_transaction *trans)
  1419. {
  1420. wait_event(root->fs_info->transaction_wait,
  1421. trans->state >= TRANS_STATE_UNBLOCKED ||
  1422. trans->aborted);
  1423. }
  1424. /*
  1425. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1426. * returns, any subsequent transaction will not be allowed to join.
  1427. */
  1428. struct btrfs_async_commit {
  1429. struct btrfs_trans_handle *newtrans;
  1430. struct btrfs_root *root;
  1431. struct work_struct work;
  1432. };
  1433. static void do_async_commit(struct work_struct *work)
  1434. {
  1435. struct btrfs_async_commit *ac =
  1436. container_of(work, struct btrfs_async_commit, work);
  1437. /*
  1438. * We've got freeze protection passed with the transaction.
  1439. * Tell lockdep about it.
  1440. */
  1441. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1442. __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
  1443. current->journal_info = ac->newtrans;
  1444. btrfs_commit_transaction(ac->newtrans, ac->root);
  1445. kfree(ac);
  1446. }
  1447. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1448. struct btrfs_root *root,
  1449. int wait_for_unblock)
  1450. {
  1451. struct btrfs_async_commit *ac;
  1452. struct btrfs_transaction *cur_trans;
  1453. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1454. if (!ac)
  1455. return -ENOMEM;
  1456. INIT_WORK(&ac->work, do_async_commit);
  1457. ac->root = root;
  1458. ac->newtrans = btrfs_join_transaction(root);
  1459. if (IS_ERR(ac->newtrans)) {
  1460. int err = PTR_ERR(ac->newtrans);
  1461. kfree(ac);
  1462. return err;
  1463. }
  1464. /* take transaction reference */
  1465. cur_trans = trans->transaction;
  1466. atomic_inc(&cur_trans->use_count);
  1467. btrfs_end_transaction(trans, root);
  1468. /*
  1469. * Tell lockdep we've released the freeze rwsem, since the
  1470. * async commit thread will be the one to unlock it.
  1471. */
  1472. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1473. __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
  1474. schedule_work(&ac->work);
  1475. /* wait for transaction to start and unblock */
  1476. if (wait_for_unblock)
  1477. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1478. else
  1479. wait_current_trans_commit_start(root, cur_trans);
  1480. if (current->journal_info == trans)
  1481. current->journal_info = NULL;
  1482. btrfs_put_transaction(cur_trans);
  1483. return 0;
  1484. }
  1485. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1486. struct btrfs_root *root, int err)
  1487. {
  1488. struct btrfs_transaction *cur_trans = trans->transaction;
  1489. DEFINE_WAIT(wait);
  1490. WARN_ON(trans->use_count > 1);
  1491. btrfs_abort_transaction(trans, root, err);
  1492. spin_lock(&root->fs_info->trans_lock);
  1493. /*
  1494. * If the transaction is removed from the list, it means this
  1495. * transaction has been committed successfully, so it is impossible
  1496. * to call the cleanup function.
  1497. */
  1498. BUG_ON(list_empty(&cur_trans->list));
  1499. list_del_init(&cur_trans->list);
  1500. if (cur_trans == root->fs_info->running_transaction) {
  1501. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1502. spin_unlock(&root->fs_info->trans_lock);
  1503. wait_event(cur_trans->writer_wait,
  1504. atomic_read(&cur_trans->num_writers) == 1);
  1505. spin_lock(&root->fs_info->trans_lock);
  1506. }
  1507. spin_unlock(&root->fs_info->trans_lock);
  1508. btrfs_cleanup_one_transaction(trans->transaction, root);
  1509. spin_lock(&root->fs_info->trans_lock);
  1510. if (cur_trans == root->fs_info->running_transaction)
  1511. root->fs_info->running_transaction = NULL;
  1512. spin_unlock(&root->fs_info->trans_lock);
  1513. if (trans->type & __TRANS_FREEZABLE)
  1514. sb_end_intwrite(root->fs_info->sb);
  1515. btrfs_put_transaction(cur_trans);
  1516. btrfs_put_transaction(cur_trans);
  1517. trace_btrfs_transaction_commit(root);
  1518. if (current->journal_info == trans)
  1519. current->journal_info = NULL;
  1520. btrfs_scrub_cancel(root->fs_info);
  1521. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1522. }
  1523. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1524. {
  1525. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1526. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1527. return 0;
  1528. }
  1529. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1530. {
  1531. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1532. btrfs_wait_ordered_roots(fs_info, -1);
  1533. }
  1534. static inline void
  1535. btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
  1536. struct btrfs_fs_info *fs_info)
  1537. {
  1538. struct btrfs_ordered_extent *ordered;
  1539. spin_lock(&fs_info->trans_lock);
  1540. while (!list_empty(&cur_trans->pending_ordered)) {
  1541. ordered = list_first_entry(&cur_trans->pending_ordered,
  1542. struct btrfs_ordered_extent,
  1543. trans_list);
  1544. list_del_init(&ordered->trans_list);
  1545. spin_unlock(&fs_info->trans_lock);
  1546. wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
  1547. &ordered->flags));
  1548. btrfs_put_ordered_extent(ordered);
  1549. spin_lock(&fs_info->trans_lock);
  1550. }
  1551. spin_unlock(&fs_info->trans_lock);
  1552. }
  1553. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1554. struct btrfs_root *root)
  1555. {
  1556. struct btrfs_transaction *cur_trans = trans->transaction;
  1557. struct btrfs_transaction *prev_trans = NULL;
  1558. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  1559. int ret;
  1560. /* Stop the commit early if ->aborted is set */
  1561. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1562. ret = cur_trans->aborted;
  1563. btrfs_end_transaction(trans, root);
  1564. return ret;
  1565. }
  1566. /* make a pass through all the delayed refs we have so far
  1567. * any runnings procs may add more while we are here
  1568. */
  1569. ret = btrfs_run_delayed_refs(trans, root, 0);
  1570. if (ret) {
  1571. btrfs_end_transaction(trans, root);
  1572. return ret;
  1573. }
  1574. btrfs_trans_release_metadata(trans, root);
  1575. trans->block_rsv = NULL;
  1576. if (trans->qgroup_reserved) {
  1577. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1578. trans->qgroup_reserved = 0;
  1579. }
  1580. cur_trans = trans->transaction;
  1581. /*
  1582. * set the flushing flag so procs in this transaction have to
  1583. * start sending their work down.
  1584. */
  1585. cur_trans->delayed_refs.flushing = 1;
  1586. smp_wmb();
  1587. if (!list_empty(&trans->new_bgs))
  1588. btrfs_create_pending_block_groups(trans, root);
  1589. ret = btrfs_run_delayed_refs(trans, root, 0);
  1590. if (ret) {
  1591. btrfs_end_transaction(trans, root);
  1592. return ret;
  1593. }
  1594. if (!cur_trans->dirty_bg_run) {
  1595. int run_it = 0;
  1596. /* this mutex is also taken before trying to set
  1597. * block groups readonly. We need to make sure
  1598. * that nobody has set a block group readonly
  1599. * after a extents from that block group have been
  1600. * allocated for cache files. btrfs_set_block_group_ro
  1601. * will wait for the transaction to commit if it
  1602. * finds dirty_bg_run = 1
  1603. *
  1604. * The dirty_bg_run flag is also used to make sure only
  1605. * one process starts all the block group IO. It wouldn't
  1606. * hurt to have more than one go through, but there's no
  1607. * real advantage to it either.
  1608. */
  1609. mutex_lock(&root->fs_info->ro_block_group_mutex);
  1610. if (!cur_trans->dirty_bg_run) {
  1611. run_it = 1;
  1612. cur_trans->dirty_bg_run = 1;
  1613. }
  1614. mutex_unlock(&root->fs_info->ro_block_group_mutex);
  1615. if (run_it)
  1616. ret = btrfs_start_dirty_block_groups(trans, root);
  1617. }
  1618. if (ret) {
  1619. btrfs_end_transaction(trans, root);
  1620. return ret;
  1621. }
  1622. spin_lock(&root->fs_info->trans_lock);
  1623. list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
  1624. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1625. spin_unlock(&root->fs_info->trans_lock);
  1626. atomic_inc(&cur_trans->use_count);
  1627. ret = btrfs_end_transaction(trans, root);
  1628. wait_for_commit(root, cur_trans);
  1629. if (unlikely(cur_trans->aborted))
  1630. ret = cur_trans->aborted;
  1631. btrfs_put_transaction(cur_trans);
  1632. return ret;
  1633. }
  1634. cur_trans->state = TRANS_STATE_COMMIT_START;
  1635. wake_up(&root->fs_info->transaction_blocked_wait);
  1636. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1637. prev_trans = list_entry(cur_trans->list.prev,
  1638. struct btrfs_transaction, list);
  1639. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1640. atomic_inc(&prev_trans->use_count);
  1641. spin_unlock(&root->fs_info->trans_lock);
  1642. wait_for_commit(root, prev_trans);
  1643. ret = prev_trans->aborted;
  1644. btrfs_put_transaction(prev_trans);
  1645. if (ret)
  1646. goto cleanup_transaction;
  1647. } else {
  1648. spin_unlock(&root->fs_info->trans_lock);
  1649. }
  1650. } else {
  1651. spin_unlock(&root->fs_info->trans_lock);
  1652. }
  1653. extwriter_counter_dec(cur_trans, trans->type);
  1654. ret = btrfs_start_delalloc_flush(root->fs_info);
  1655. if (ret)
  1656. goto cleanup_transaction;
  1657. ret = btrfs_run_delayed_items(trans, root);
  1658. if (ret)
  1659. goto cleanup_transaction;
  1660. wait_event(cur_trans->writer_wait,
  1661. extwriter_counter_read(cur_trans) == 0);
  1662. /* some pending stuffs might be added after the previous flush. */
  1663. ret = btrfs_run_delayed_items(trans, root);
  1664. if (ret)
  1665. goto cleanup_transaction;
  1666. btrfs_wait_delalloc_flush(root->fs_info);
  1667. btrfs_wait_pending_ordered(cur_trans, root->fs_info);
  1668. btrfs_scrub_pause(root);
  1669. /*
  1670. * Ok now we need to make sure to block out any other joins while we
  1671. * commit the transaction. We could have started a join before setting
  1672. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1673. */
  1674. spin_lock(&root->fs_info->trans_lock);
  1675. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1676. spin_unlock(&root->fs_info->trans_lock);
  1677. wait_event(cur_trans->writer_wait,
  1678. atomic_read(&cur_trans->num_writers) == 1);
  1679. /* ->aborted might be set after the previous check, so check it */
  1680. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1681. ret = cur_trans->aborted;
  1682. goto scrub_continue;
  1683. }
  1684. /*
  1685. * the reloc mutex makes sure that we stop
  1686. * the balancing code from coming in and moving
  1687. * extents around in the middle of the commit
  1688. */
  1689. mutex_lock(&root->fs_info->reloc_mutex);
  1690. /*
  1691. * We needn't worry about the delayed items because we will
  1692. * deal with them in create_pending_snapshot(), which is the
  1693. * core function of the snapshot creation.
  1694. */
  1695. ret = create_pending_snapshots(trans, root->fs_info);
  1696. if (ret) {
  1697. mutex_unlock(&root->fs_info->reloc_mutex);
  1698. goto scrub_continue;
  1699. }
  1700. /*
  1701. * We insert the dir indexes of the snapshots and update the inode
  1702. * of the snapshots' parents after the snapshot creation, so there
  1703. * are some delayed items which are not dealt with. Now deal with
  1704. * them.
  1705. *
  1706. * We needn't worry that this operation will corrupt the snapshots,
  1707. * because all the tree which are snapshoted will be forced to COW
  1708. * the nodes and leaves.
  1709. */
  1710. ret = btrfs_run_delayed_items(trans, root);
  1711. if (ret) {
  1712. mutex_unlock(&root->fs_info->reloc_mutex);
  1713. goto scrub_continue;
  1714. }
  1715. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1716. if (ret) {
  1717. mutex_unlock(&root->fs_info->reloc_mutex);
  1718. goto scrub_continue;
  1719. }
  1720. /* Reocrd old roots for later qgroup accounting */
  1721. ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
  1722. if (ret) {
  1723. mutex_unlock(&root->fs_info->reloc_mutex);
  1724. goto scrub_continue;
  1725. }
  1726. /*
  1727. * make sure none of the code above managed to slip in a
  1728. * delayed item
  1729. */
  1730. btrfs_assert_delayed_root_empty(root);
  1731. WARN_ON(cur_trans != trans->transaction);
  1732. /* btrfs_commit_tree_roots is responsible for getting the
  1733. * various roots consistent with each other. Every pointer
  1734. * in the tree of tree roots has to point to the most up to date
  1735. * root for every subvolume and other tree. So, we have to keep
  1736. * the tree logging code from jumping in and changing any
  1737. * of the trees.
  1738. *
  1739. * At this point in the commit, there can't be any tree-log
  1740. * writers, but a little lower down we drop the trans mutex
  1741. * and let new people in. By holding the tree_log_mutex
  1742. * from now until after the super is written, we avoid races
  1743. * with the tree-log code.
  1744. */
  1745. mutex_lock(&root->fs_info->tree_log_mutex);
  1746. ret = commit_fs_roots(trans, root);
  1747. if (ret) {
  1748. mutex_unlock(&root->fs_info->tree_log_mutex);
  1749. mutex_unlock(&root->fs_info->reloc_mutex);
  1750. goto scrub_continue;
  1751. }
  1752. /*
  1753. * Since the transaction is done, we can apply the pending changes
  1754. * before the next transaction.
  1755. */
  1756. btrfs_apply_pending_changes(root->fs_info);
  1757. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1758. * safe to free the root of tree log roots
  1759. */
  1760. btrfs_free_log_root_tree(trans, root->fs_info);
  1761. /*
  1762. * Since fs roots are all committed, we can get a quite accurate
  1763. * new_roots. So let's do quota accounting.
  1764. */
  1765. ret = btrfs_qgroup_account_extents(trans, root->fs_info);
  1766. if (ret < 0) {
  1767. mutex_unlock(&root->fs_info->tree_log_mutex);
  1768. mutex_unlock(&root->fs_info->reloc_mutex);
  1769. goto scrub_continue;
  1770. }
  1771. ret = commit_cowonly_roots(trans, root);
  1772. if (ret) {
  1773. mutex_unlock(&root->fs_info->tree_log_mutex);
  1774. mutex_unlock(&root->fs_info->reloc_mutex);
  1775. goto scrub_continue;
  1776. }
  1777. /*
  1778. * The tasks which save the space cache and inode cache may also
  1779. * update ->aborted, check it.
  1780. */
  1781. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1782. ret = cur_trans->aborted;
  1783. mutex_unlock(&root->fs_info->tree_log_mutex);
  1784. mutex_unlock(&root->fs_info->reloc_mutex);
  1785. goto scrub_continue;
  1786. }
  1787. btrfs_prepare_extent_commit(trans, root);
  1788. cur_trans = root->fs_info->running_transaction;
  1789. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1790. root->fs_info->tree_root->node);
  1791. list_add_tail(&root->fs_info->tree_root->dirty_list,
  1792. &cur_trans->switch_commits);
  1793. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1794. root->fs_info->chunk_root->node);
  1795. list_add_tail(&root->fs_info->chunk_root->dirty_list,
  1796. &cur_trans->switch_commits);
  1797. switch_commit_roots(cur_trans, root->fs_info);
  1798. assert_qgroups_uptodate(trans);
  1799. ASSERT(list_empty(&cur_trans->dirty_bgs));
  1800. ASSERT(list_empty(&cur_trans->io_bgs));
  1801. update_super_roots(root);
  1802. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1803. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1804. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1805. sizeof(*root->fs_info->super_copy));
  1806. btrfs_update_commit_device_size(root->fs_info);
  1807. btrfs_update_commit_device_bytes_used(root, cur_trans);
  1808. clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
  1809. clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
  1810. btrfs_trans_release_chunk_metadata(trans);
  1811. spin_lock(&root->fs_info->trans_lock);
  1812. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1813. root->fs_info->running_transaction = NULL;
  1814. spin_unlock(&root->fs_info->trans_lock);
  1815. mutex_unlock(&root->fs_info->reloc_mutex);
  1816. wake_up(&root->fs_info->transaction_wait);
  1817. ret = btrfs_write_and_wait_transaction(trans, root);
  1818. if (ret) {
  1819. btrfs_error(root->fs_info, ret,
  1820. "Error while writing out transaction");
  1821. mutex_unlock(&root->fs_info->tree_log_mutex);
  1822. goto scrub_continue;
  1823. }
  1824. ret = write_ctree_super(trans, root, 0);
  1825. if (ret) {
  1826. mutex_unlock(&root->fs_info->tree_log_mutex);
  1827. goto scrub_continue;
  1828. }
  1829. /*
  1830. * the super is written, we can safely allow the tree-loggers
  1831. * to go about their business
  1832. */
  1833. mutex_unlock(&root->fs_info->tree_log_mutex);
  1834. btrfs_finish_extent_commit(trans, root);
  1835. if (cur_trans->have_free_bgs)
  1836. btrfs_clear_space_info_full(root->fs_info);
  1837. root->fs_info->last_trans_committed = cur_trans->transid;
  1838. /*
  1839. * We needn't acquire the lock here because there is no other task
  1840. * which can change it.
  1841. */
  1842. cur_trans->state = TRANS_STATE_COMPLETED;
  1843. wake_up(&cur_trans->commit_wait);
  1844. spin_lock(&root->fs_info->trans_lock);
  1845. list_del_init(&cur_trans->list);
  1846. spin_unlock(&root->fs_info->trans_lock);
  1847. btrfs_put_transaction(cur_trans);
  1848. btrfs_put_transaction(cur_trans);
  1849. if (trans->type & __TRANS_FREEZABLE)
  1850. sb_end_intwrite(root->fs_info->sb);
  1851. trace_btrfs_transaction_commit(root);
  1852. btrfs_scrub_continue(root);
  1853. if (current->journal_info == trans)
  1854. current->journal_info = NULL;
  1855. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1856. if (current != root->fs_info->transaction_kthread &&
  1857. current != root->fs_info->cleaner_kthread)
  1858. btrfs_run_delayed_iputs(root);
  1859. return ret;
  1860. scrub_continue:
  1861. btrfs_scrub_continue(root);
  1862. cleanup_transaction:
  1863. btrfs_trans_release_metadata(trans, root);
  1864. btrfs_trans_release_chunk_metadata(trans);
  1865. trans->block_rsv = NULL;
  1866. if (trans->qgroup_reserved) {
  1867. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1868. trans->qgroup_reserved = 0;
  1869. }
  1870. btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
  1871. if (current->journal_info == trans)
  1872. current->journal_info = NULL;
  1873. cleanup_transaction(trans, root, ret);
  1874. return ret;
  1875. }
  1876. /*
  1877. * return < 0 if error
  1878. * 0 if there are no more dead_roots at the time of call
  1879. * 1 there are more to be processed, call me again
  1880. *
  1881. * The return value indicates there are certainly more snapshots to delete, but
  1882. * if there comes a new one during processing, it may return 0. We don't mind,
  1883. * because btrfs_commit_super will poke cleaner thread and it will process it a
  1884. * few seconds later.
  1885. */
  1886. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  1887. {
  1888. int ret;
  1889. struct btrfs_fs_info *fs_info = root->fs_info;
  1890. spin_lock(&fs_info->trans_lock);
  1891. if (list_empty(&fs_info->dead_roots)) {
  1892. spin_unlock(&fs_info->trans_lock);
  1893. return 0;
  1894. }
  1895. root = list_first_entry(&fs_info->dead_roots,
  1896. struct btrfs_root, root_list);
  1897. list_del_init(&root->root_list);
  1898. spin_unlock(&fs_info->trans_lock);
  1899. pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
  1900. btrfs_kill_all_delayed_nodes(root);
  1901. if (btrfs_header_backref_rev(root->node) <
  1902. BTRFS_MIXED_BACKREF_REV)
  1903. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1904. else
  1905. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  1906. return (ret < 0) ? 0 : 1;
  1907. }
  1908. void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
  1909. {
  1910. unsigned long prev;
  1911. unsigned long bit;
  1912. prev = xchg(&fs_info->pending_changes, 0);
  1913. if (!prev)
  1914. return;
  1915. bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
  1916. if (prev & bit)
  1917. btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  1918. prev &= ~bit;
  1919. bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
  1920. if (prev & bit)
  1921. btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  1922. prev &= ~bit;
  1923. bit = 1 << BTRFS_PENDING_COMMIT;
  1924. if (prev & bit)
  1925. btrfs_debug(fs_info, "pending commit done");
  1926. prev &= ~bit;
  1927. if (prev)
  1928. btrfs_warn(fs_info,
  1929. "unknown pending changes left 0x%lx, ignoring", prev);
  1930. }