transaction.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(refcount_read(&transaction->use_count) == 0);
  60. if (refcount_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. if (transaction->delayed_refs.pending_csums)
  64. btrfs_err(transaction->fs_info,
  65. "pending csums is %llu",
  66. transaction->delayed_refs.pending_csums);
  67. while (!list_empty(&transaction->pending_chunks)) {
  68. struct extent_map *em;
  69. em = list_first_entry(&transaction->pending_chunks,
  70. struct extent_map, list);
  71. list_del_init(&em->list);
  72. free_extent_map(em);
  73. }
  74. /*
  75. * If any block groups are found in ->deleted_bgs then it's
  76. * because the transaction was aborted and a commit did not
  77. * happen (things failed before writing the new superblock
  78. * and calling btrfs_finish_extent_commit()), so we can not
  79. * discard the physical locations of the block groups.
  80. */
  81. while (!list_empty(&transaction->deleted_bgs)) {
  82. struct btrfs_block_group_cache *cache;
  83. cache = list_first_entry(&transaction->deleted_bgs,
  84. struct btrfs_block_group_cache,
  85. bg_list);
  86. list_del_init(&cache->bg_list);
  87. btrfs_put_block_group_trimming(cache);
  88. btrfs_put_block_group(cache);
  89. }
  90. kmem_cache_free(btrfs_transaction_cachep, transaction);
  91. }
  92. }
  93. static void clear_btree_io_tree(struct extent_io_tree *tree)
  94. {
  95. spin_lock(&tree->lock);
  96. /*
  97. * Do a single barrier for the waitqueue_active check here, the state
  98. * of the waitqueue should not change once clear_btree_io_tree is
  99. * called.
  100. */
  101. smp_mb();
  102. while (!RB_EMPTY_ROOT(&tree->state)) {
  103. struct rb_node *node;
  104. struct extent_state *state;
  105. node = rb_first(&tree->state);
  106. state = rb_entry(node, struct extent_state, rb_node);
  107. rb_erase(&state->rb_node, &tree->state);
  108. RB_CLEAR_NODE(&state->rb_node);
  109. /*
  110. * btree io trees aren't supposed to have tasks waiting for
  111. * changes in the flags of extent states ever.
  112. */
  113. ASSERT(!waitqueue_active(&state->wq));
  114. free_extent_state(state);
  115. cond_resched_lock(&tree->lock);
  116. }
  117. spin_unlock(&tree->lock);
  118. }
  119. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  120. struct btrfs_fs_info *fs_info)
  121. {
  122. struct btrfs_root *root, *tmp;
  123. down_write(&fs_info->commit_root_sem);
  124. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  125. dirty_list) {
  126. list_del_init(&root->dirty_list);
  127. free_extent_buffer(root->commit_root);
  128. root->commit_root = btrfs_root_node(root);
  129. if (is_fstree(root->objectid))
  130. btrfs_unpin_free_ino(root);
  131. clear_btree_io_tree(&root->dirty_log_pages);
  132. }
  133. /* We can free old roots now. */
  134. spin_lock(&trans->dropped_roots_lock);
  135. while (!list_empty(&trans->dropped_roots)) {
  136. root = list_first_entry(&trans->dropped_roots,
  137. struct btrfs_root, root_list);
  138. list_del_init(&root->root_list);
  139. spin_unlock(&trans->dropped_roots_lock);
  140. btrfs_drop_and_free_fs_root(fs_info, root);
  141. spin_lock(&trans->dropped_roots_lock);
  142. }
  143. spin_unlock(&trans->dropped_roots_lock);
  144. up_write(&fs_info->commit_root_sem);
  145. }
  146. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  147. unsigned int type)
  148. {
  149. if (type & TRANS_EXTWRITERS)
  150. atomic_inc(&trans->num_extwriters);
  151. }
  152. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  153. unsigned int type)
  154. {
  155. if (type & TRANS_EXTWRITERS)
  156. atomic_dec(&trans->num_extwriters);
  157. }
  158. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  159. unsigned int type)
  160. {
  161. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  162. }
  163. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  164. {
  165. return atomic_read(&trans->num_extwriters);
  166. }
  167. /*
  168. * either allocate a new transaction or hop into the existing one
  169. */
  170. static noinline int join_transaction(struct btrfs_fs_info *fs_info,
  171. unsigned int type)
  172. {
  173. struct btrfs_transaction *cur_trans;
  174. spin_lock(&fs_info->trans_lock);
  175. loop:
  176. /* The file system has been taken offline. No new transactions. */
  177. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  178. spin_unlock(&fs_info->trans_lock);
  179. return -EROFS;
  180. }
  181. cur_trans = fs_info->running_transaction;
  182. if (cur_trans) {
  183. if (cur_trans->aborted) {
  184. spin_unlock(&fs_info->trans_lock);
  185. return cur_trans->aborted;
  186. }
  187. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  188. spin_unlock(&fs_info->trans_lock);
  189. return -EBUSY;
  190. }
  191. refcount_inc(&cur_trans->use_count);
  192. atomic_inc(&cur_trans->num_writers);
  193. extwriter_counter_inc(cur_trans, type);
  194. spin_unlock(&fs_info->trans_lock);
  195. return 0;
  196. }
  197. spin_unlock(&fs_info->trans_lock);
  198. /*
  199. * If we are ATTACH, we just want to catch the current transaction,
  200. * and commit it. If there is no transaction, just return ENOENT.
  201. */
  202. if (type == TRANS_ATTACH)
  203. return -ENOENT;
  204. /*
  205. * JOIN_NOLOCK only happens during the transaction commit, so
  206. * it is impossible that ->running_transaction is NULL
  207. */
  208. BUG_ON(type == TRANS_JOIN_NOLOCK);
  209. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  210. if (!cur_trans)
  211. return -ENOMEM;
  212. spin_lock(&fs_info->trans_lock);
  213. if (fs_info->running_transaction) {
  214. /*
  215. * someone started a transaction after we unlocked. Make sure
  216. * to redo the checks above
  217. */
  218. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  219. goto loop;
  220. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  221. spin_unlock(&fs_info->trans_lock);
  222. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  223. return -EROFS;
  224. }
  225. cur_trans->fs_info = fs_info;
  226. atomic_set(&cur_trans->num_writers, 1);
  227. extwriter_counter_init(cur_trans, type);
  228. init_waitqueue_head(&cur_trans->writer_wait);
  229. init_waitqueue_head(&cur_trans->commit_wait);
  230. init_waitqueue_head(&cur_trans->pending_wait);
  231. cur_trans->state = TRANS_STATE_RUNNING;
  232. /*
  233. * One for this trans handle, one so it will live on until we
  234. * commit the transaction.
  235. */
  236. refcount_set(&cur_trans->use_count, 2);
  237. atomic_set(&cur_trans->pending_ordered, 0);
  238. cur_trans->flags = 0;
  239. cur_trans->start_time = get_seconds();
  240. memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
  241. cur_trans->delayed_refs.href_root = RB_ROOT;
  242. cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
  243. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  244. /*
  245. * although the tree mod log is per file system and not per transaction,
  246. * the log must never go across transaction boundaries.
  247. */
  248. smp_mb();
  249. if (!list_empty(&fs_info->tree_mod_seq_list))
  250. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
  251. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  252. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
  253. atomic64_set(&fs_info->tree_mod_seq, 0);
  254. spin_lock_init(&cur_trans->delayed_refs.lock);
  255. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  256. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  257. INIT_LIST_HEAD(&cur_trans->switch_commits);
  258. INIT_LIST_HEAD(&cur_trans->dirty_bgs);
  259. INIT_LIST_HEAD(&cur_trans->io_bgs);
  260. INIT_LIST_HEAD(&cur_trans->dropped_roots);
  261. mutex_init(&cur_trans->cache_write_mutex);
  262. cur_trans->num_dirty_bgs = 0;
  263. spin_lock_init(&cur_trans->dirty_bgs_lock);
  264. INIT_LIST_HEAD(&cur_trans->deleted_bgs);
  265. spin_lock_init(&cur_trans->dropped_roots_lock);
  266. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  267. extent_io_tree_init(&cur_trans->dirty_pages,
  268. fs_info->btree_inode);
  269. fs_info->generation++;
  270. cur_trans->transid = fs_info->generation;
  271. fs_info->running_transaction = cur_trans;
  272. cur_trans->aborted = 0;
  273. spin_unlock(&fs_info->trans_lock);
  274. return 0;
  275. }
  276. /*
  277. * this does all the record keeping required to make sure that a reference
  278. * counted root is properly recorded in a given transaction. This is required
  279. * to make sure the old root from before we joined the transaction is deleted
  280. * when the transaction commits
  281. */
  282. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  283. struct btrfs_root *root,
  284. int force)
  285. {
  286. struct btrfs_fs_info *fs_info = root->fs_info;
  287. if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  288. root->last_trans < trans->transid) || force) {
  289. WARN_ON(root == fs_info->extent_root);
  290. WARN_ON(root->commit_root != root->node);
  291. /*
  292. * see below for IN_TRANS_SETUP usage rules
  293. * we have the reloc mutex held now, so there
  294. * is only one writer in this function
  295. */
  296. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  297. /* make sure readers find IN_TRANS_SETUP before
  298. * they find our root->last_trans update
  299. */
  300. smp_wmb();
  301. spin_lock(&fs_info->fs_roots_radix_lock);
  302. if (root->last_trans == trans->transid && !force) {
  303. spin_unlock(&fs_info->fs_roots_radix_lock);
  304. return 0;
  305. }
  306. radix_tree_tag_set(&fs_info->fs_roots_radix,
  307. (unsigned long)root->root_key.objectid,
  308. BTRFS_ROOT_TRANS_TAG);
  309. spin_unlock(&fs_info->fs_roots_radix_lock);
  310. root->last_trans = trans->transid;
  311. /* this is pretty tricky. We don't want to
  312. * take the relocation lock in btrfs_record_root_in_trans
  313. * unless we're really doing the first setup for this root in
  314. * this transaction.
  315. *
  316. * Normally we'd use root->last_trans as a flag to decide
  317. * if we want to take the expensive mutex.
  318. *
  319. * But, we have to set root->last_trans before we
  320. * init the relocation root, otherwise, we trip over warnings
  321. * in ctree.c. The solution used here is to flag ourselves
  322. * with root IN_TRANS_SETUP. When this is 1, we're still
  323. * fixing up the reloc trees and everyone must wait.
  324. *
  325. * When this is zero, they can trust root->last_trans and fly
  326. * through btrfs_record_root_in_trans without having to take the
  327. * lock. smp_wmb() makes sure that all the writes above are
  328. * done before we pop in the zero below
  329. */
  330. btrfs_init_reloc_root(trans, root);
  331. smp_mb__before_atomic();
  332. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  333. }
  334. return 0;
  335. }
  336. void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
  337. struct btrfs_root *root)
  338. {
  339. struct btrfs_fs_info *fs_info = root->fs_info;
  340. struct btrfs_transaction *cur_trans = trans->transaction;
  341. /* Add ourselves to the transaction dropped list */
  342. spin_lock(&cur_trans->dropped_roots_lock);
  343. list_add_tail(&root->root_list, &cur_trans->dropped_roots);
  344. spin_unlock(&cur_trans->dropped_roots_lock);
  345. /* Make sure we don't try to update the root at commit time */
  346. spin_lock(&fs_info->fs_roots_radix_lock);
  347. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  348. (unsigned long)root->root_key.objectid,
  349. BTRFS_ROOT_TRANS_TAG);
  350. spin_unlock(&fs_info->fs_roots_radix_lock);
  351. }
  352. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  353. struct btrfs_root *root)
  354. {
  355. struct btrfs_fs_info *fs_info = root->fs_info;
  356. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  357. return 0;
  358. /*
  359. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  360. * and barriers
  361. */
  362. smp_rmb();
  363. if (root->last_trans == trans->transid &&
  364. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  365. return 0;
  366. mutex_lock(&fs_info->reloc_mutex);
  367. record_root_in_trans(trans, root, 0);
  368. mutex_unlock(&fs_info->reloc_mutex);
  369. return 0;
  370. }
  371. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  372. {
  373. return (trans->state >= TRANS_STATE_BLOCKED &&
  374. trans->state < TRANS_STATE_UNBLOCKED &&
  375. !trans->aborted);
  376. }
  377. /* wait for commit against the current transaction to become unblocked
  378. * when this is done, it is safe to start a new transaction, but the current
  379. * transaction might not be fully on disk.
  380. */
  381. static void wait_current_trans(struct btrfs_fs_info *fs_info)
  382. {
  383. struct btrfs_transaction *cur_trans;
  384. spin_lock(&fs_info->trans_lock);
  385. cur_trans = fs_info->running_transaction;
  386. if (cur_trans && is_transaction_blocked(cur_trans)) {
  387. refcount_inc(&cur_trans->use_count);
  388. spin_unlock(&fs_info->trans_lock);
  389. wait_event(fs_info->transaction_wait,
  390. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  391. cur_trans->aborted);
  392. btrfs_put_transaction(cur_trans);
  393. } else {
  394. spin_unlock(&fs_info->trans_lock);
  395. }
  396. }
  397. static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
  398. {
  399. if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
  400. return 0;
  401. if (type == TRANS_USERSPACE)
  402. return 1;
  403. if (type == TRANS_START &&
  404. !atomic_read(&fs_info->open_ioctl_trans))
  405. return 1;
  406. return 0;
  407. }
  408. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  409. {
  410. struct btrfs_fs_info *fs_info = root->fs_info;
  411. if (!fs_info->reloc_ctl ||
  412. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  413. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  414. root->reloc_root)
  415. return false;
  416. return true;
  417. }
  418. static struct btrfs_trans_handle *
  419. start_transaction(struct btrfs_root *root, unsigned int num_items,
  420. unsigned int type, enum btrfs_reserve_flush_enum flush,
  421. bool enforce_qgroups)
  422. {
  423. struct btrfs_fs_info *fs_info = root->fs_info;
  424. struct btrfs_trans_handle *h;
  425. struct btrfs_transaction *cur_trans;
  426. u64 num_bytes = 0;
  427. u64 qgroup_reserved = 0;
  428. bool reloc_reserved = false;
  429. int ret;
  430. /* Send isn't supposed to start transactions. */
  431. ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
  432. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
  433. return ERR_PTR(-EROFS);
  434. if (current->journal_info) {
  435. WARN_ON(type & TRANS_EXTWRITERS);
  436. h = current->journal_info;
  437. h->use_count++;
  438. WARN_ON(h->use_count > 2);
  439. h->orig_rsv = h->block_rsv;
  440. h->block_rsv = NULL;
  441. goto got_it;
  442. }
  443. /*
  444. * Do the reservation before we join the transaction so we can do all
  445. * the appropriate flushing if need be.
  446. */
  447. if (num_items && root != fs_info->chunk_root) {
  448. qgroup_reserved = num_items * fs_info->nodesize;
  449. ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved,
  450. enforce_qgroups);
  451. if (ret)
  452. return ERR_PTR(ret);
  453. num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
  454. /*
  455. * Do the reservation for the relocation root creation
  456. */
  457. if (need_reserve_reloc_root(root)) {
  458. num_bytes += fs_info->nodesize;
  459. reloc_reserved = true;
  460. }
  461. ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
  462. num_bytes, flush);
  463. if (ret)
  464. goto reserve_fail;
  465. }
  466. again:
  467. h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
  468. if (!h) {
  469. ret = -ENOMEM;
  470. goto alloc_fail;
  471. }
  472. /*
  473. * If we are JOIN_NOLOCK we're already committing a transaction and
  474. * waiting on this guy, so we don't need to do the sb_start_intwrite
  475. * because we're already holding a ref. We need this because we could
  476. * have raced in and did an fsync() on a file which can kick a commit
  477. * and then we deadlock with somebody doing a freeze.
  478. *
  479. * If we are ATTACH, it means we just want to catch the current
  480. * transaction and commit it, so we needn't do sb_start_intwrite().
  481. */
  482. if (type & __TRANS_FREEZABLE)
  483. sb_start_intwrite(fs_info->sb);
  484. if (may_wait_transaction(fs_info, type))
  485. wait_current_trans(fs_info);
  486. do {
  487. ret = join_transaction(fs_info, type);
  488. if (ret == -EBUSY) {
  489. wait_current_trans(fs_info);
  490. if (unlikely(type == TRANS_ATTACH))
  491. ret = -ENOENT;
  492. }
  493. } while (ret == -EBUSY);
  494. if (ret < 0)
  495. goto join_fail;
  496. cur_trans = fs_info->running_transaction;
  497. h->transid = cur_trans->transid;
  498. h->transaction = cur_trans;
  499. h->root = root;
  500. h->use_count = 1;
  501. h->fs_info = root->fs_info;
  502. h->type = type;
  503. h->can_flush_pending_bgs = true;
  504. INIT_LIST_HEAD(&h->new_bgs);
  505. smp_mb();
  506. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  507. may_wait_transaction(fs_info, type)) {
  508. current->journal_info = h;
  509. btrfs_commit_transaction(h);
  510. goto again;
  511. }
  512. if (num_bytes) {
  513. trace_btrfs_space_reservation(fs_info, "transaction",
  514. h->transid, num_bytes, 1);
  515. h->block_rsv = &fs_info->trans_block_rsv;
  516. h->bytes_reserved = num_bytes;
  517. h->reloc_reserved = reloc_reserved;
  518. }
  519. got_it:
  520. btrfs_record_root_in_trans(h, root);
  521. if (!current->journal_info && type != TRANS_USERSPACE)
  522. current->journal_info = h;
  523. return h;
  524. join_fail:
  525. if (type & __TRANS_FREEZABLE)
  526. sb_end_intwrite(fs_info->sb);
  527. kmem_cache_free(btrfs_trans_handle_cachep, h);
  528. alloc_fail:
  529. if (num_bytes)
  530. btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
  531. num_bytes);
  532. reserve_fail:
  533. btrfs_qgroup_free_meta(root, qgroup_reserved);
  534. return ERR_PTR(ret);
  535. }
  536. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  537. unsigned int num_items)
  538. {
  539. return start_transaction(root, num_items, TRANS_START,
  540. BTRFS_RESERVE_FLUSH_ALL, true);
  541. }
  542. struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
  543. struct btrfs_root *root,
  544. unsigned int num_items,
  545. int min_factor)
  546. {
  547. struct btrfs_fs_info *fs_info = root->fs_info;
  548. struct btrfs_trans_handle *trans;
  549. u64 num_bytes;
  550. int ret;
  551. /*
  552. * We have two callers: unlink and block group removal. The
  553. * former should succeed even if we will temporarily exceed
  554. * quota and the latter operates on the extent root so
  555. * qgroup enforcement is ignored anyway.
  556. */
  557. trans = start_transaction(root, num_items, TRANS_START,
  558. BTRFS_RESERVE_FLUSH_ALL, false);
  559. if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
  560. return trans;
  561. trans = btrfs_start_transaction(root, 0);
  562. if (IS_ERR(trans))
  563. return trans;
  564. num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
  565. ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
  566. num_bytes, min_factor);
  567. if (ret) {
  568. btrfs_end_transaction(trans);
  569. return ERR_PTR(ret);
  570. }
  571. trans->block_rsv = &fs_info->trans_block_rsv;
  572. trans->bytes_reserved = num_bytes;
  573. trace_btrfs_space_reservation(fs_info, "transaction",
  574. trans->transid, num_bytes, 1);
  575. return trans;
  576. }
  577. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  578. struct btrfs_root *root,
  579. unsigned int num_items)
  580. {
  581. return start_transaction(root, num_items, TRANS_START,
  582. BTRFS_RESERVE_FLUSH_LIMIT, true);
  583. }
  584. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  585. {
  586. return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
  587. true);
  588. }
  589. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  590. {
  591. return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
  592. BTRFS_RESERVE_NO_FLUSH, true);
  593. }
  594. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  595. {
  596. return start_transaction(root, 0, TRANS_USERSPACE,
  597. BTRFS_RESERVE_NO_FLUSH, true);
  598. }
  599. /*
  600. * btrfs_attach_transaction() - catch the running transaction
  601. *
  602. * It is used when we want to commit the current the transaction, but
  603. * don't want to start a new one.
  604. *
  605. * Note: If this function return -ENOENT, it just means there is no
  606. * running transaction. But it is possible that the inactive transaction
  607. * is still in the memory, not fully on disk. If you hope there is no
  608. * inactive transaction in the fs when -ENOENT is returned, you should
  609. * invoke
  610. * btrfs_attach_transaction_barrier()
  611. */
  612. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  613. {
  614. return start_transaction(root, 0, TRANS_ATTACH,
  615. BTRFS_RESERVE_NO_FLUSH, true);
  616. }
  617. /*
  618. * btrfs_attach_transaction_barrier() - catch the running transaction
  619. *
  620. * It is similar to the above function, the differentia is this one
  621. * will wait for all the inactive transactions until they fully
  622. * complete.
  623. */
  624. struct btrfs_trans_handle *
  625. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  626. {
  627. struct btrfs_trans_handle *trans;
  628. trans = start_transaction(root, 0, TRANS_ATTACH,
  629. BTRFS_RESERVE_NO_FLUSH, true);
  630. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  631. btrfs_wait_for_commit(root->fs_info, 0);
  632. return trans;
  633. }
  634. /* wait for a transaction commit to be fully complete */
  635. static noinline void wait_for_commit(struct btrfs_transaction *commit)
  636. {
  637. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  638. }
  639. int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
  640. {
  641. struct btrfs_transaction *cur_trans = NULL, *t;
  642. int ret = 0;
  643. if (transid) {
  644. if (transid <= fs_info->last_trans_committed)
  645. goto out;
  646. /* find specified transaction */
  647. spin_lock(&fs_info->trans_lock);
  648. list_for_each_entry(t, &fs_info->trans_list, list) {
  649. if (t->transid == transid) {
  650. cur_trans = t;
  651. refcount_inc(&cur_trans->use_count);
  652. ret = 0;
  653. break;
  654. }
  655. if (t->transid > transid) {
  656. ret = 0;
  657. break;
  658. }
  659. }
  660. spin_unlock(&fs_info->trans_lock);
  661. /*
  662. * The specified transaction doesn't exist, or we
  663. * raced with btrfs_commit_transaction
  664. */
  665. if (!cur_trans) {
  666. if (transid > fs_info->last_trans_committed)
  667. ret = -EINVAL;
  668. goto out;
  669. }
  670. } else {
  671. /* find newest transaction that is committing | committed */
  672. spin_lock(&fs_info->trans_lock);
  673. list_for_each_entry_reverse(t, &fs_info->trans_list,
  674. list) {
  675. if (t->state >= TRANS_STATE_COMMIT_START) {
  676. if (t->state == TRANS_STATE_COMPLETED)
  677. break;
  678. cur_trans = t;
  679. refcount_inc(&cur_trans->use_count);
  680. break;
  681. }
  682. }
  683. spin_unlock(&fs_info->trans_lock);
  684. if (!cur_trans)
  685. goto out; /* nothing committing|committed */
  686. }
  687. wait_for_commit(cur_trans);
  688. btrfs_put_transaction(cur_trans);
  689. out:
  690. return ret;
  691. }
  692. void btrfs_throttle(struct btrfs_fs_info *fs_info)
  693. {
  694. if (!atomic_read(&fs_info->open_ioctl_trans))
  695. wait_current_trans(fs_info);
  696. }
  697. static int should_end_transaction(struct btrfs_trans_handle *trans)
  698. {
  699. struct btrfs_fs_info *fs_info = trans->fs_info;
  700. if (fs_info->global_block_rsv.space_info->full &&
  701. btrfs_check_space_for_delayed_refs(trans, fs_info))
  702. return 1;
  703. return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
  704. }
  705. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
  706. {
  707. struct btrfs_transaction *cur_trans = trans->transaction;
  708. struct btrfs_fs_info *fs_info = trans->fs_info;
  709. int updates;
  710. int err;
  711. smp_mb();
  712. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  713. cur_trans->delayed_refs.flushing)
  714. return 1;
  715. updates = trans->delayed_ref_updates;
  716. trans->delayed_ref_updates = 0;
  717. if (updates) {
  718. err = btrfs_run_delayed_refs(trans, fs_info, updates * 2);
  719. if (err) /* Error code will also eval true */
  720. return err;
  721. }
  722. return should_end_transaction(trans);
  723. }
  724. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  725. int throttle)
  726. {
  727. struct btrfs_fs_info *info = trans->fs_info;
  728. struct btrfs_transaction *cur_trans = trans->transaction;
  729. u64 transid = trans->transid;
  730. unsigned long cur = trans->delayed_ref_updates;
  731. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  732. int err = 0;
  733. int must_run_delayed_refs = 0;
  734. if (trans->use_count > 1) {
  735. trans->use_count--;
  736. trans->block_rsv = trans->orig_rsv;
  737. return 0;
  738. }
  739. btrfs_trans_release_metadata(trans, info);
  740. trans->block_rsv = NULL;
  741. if (!list_empty(&trans->new_bgs))
  742. btrfs_create_pending_block_groups(trans, info);
  743. trans->delayed_ref_updates = 0;
  744. if (!trans->sync) {
  745. must_run_delayed_refs =
  746. btrfs_should_throttle_delayed_refs(trans, info);
  747. cur = max_t(unsigned long, cur, 32);
  748. /*
  749. * don't make the caller wait if they are from a NOLOCK
  750. * or ATTACH transaction, it will deadlock with commit
  751. */
  752. if (must_run_delayed_refs == 1 &&
  753. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  754. must_run_delayed_refs = 2;
  755. }
  756. btrfs_trans_release_metadata(trans, info);
  757. trans->block_rsv = NULL;
  758. if (!list_empty(&trans->new_bgs))
  759. btrfs_create_pending_block_groups(trans, info);
  760. btrfs_trans_release_chunk_metadata(trans);
  761. if (lock && !atomic_read(&info->open_ioctl_trans) &&
  762. should_end_transaction(trans) &&
  763. READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  764. spin_lock(&info->trans_lock);
  765. if (cur_trans->state == TRANS_STATE_RUNNING)
  766. cur_trans->state = TRANS_STATE_BLOCKED;
  767. spin_unlock(&info->trans_lock);
  768. }
  769. if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  770. if (throttle)
  771. return btrfs_commit_transaction(trans);
  772. else
  773. wake_up_process(info->transaction_kthread);
  774. }
  775. if (trans->type & __TRANS_FREEZABLE)
  776. sb_end_intwrite(info->sb);
  777. WARN_ON(cur_trans != info->running_transaction);
  778. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  779. atomic_dec(&cur_trans->num_writers);
  780. extwriter_counter_dec(cur_trans, trans->type);
  781. /*
  782. * Make sure counter is updated before we wake up waiters.
  783. */
  784. smp_mb();
  785. if (waitqueue_active(&cur_trans->writer_wait))
  786. wake_up(&cur_trans->writer_wait);
  787. btrfs_put_transaction(cur_trans);
  788. if (current->journal_info == trans)
  789. current->journal_info = NULL;
  790. if (throttle)
  791. btrfs_run_delayed_iputs(info);
  792. if (trans->aborted ||
  793. test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
  794. wake_up_process(info->transaction_kthread);
  795. err = -EIO;
  796. }
  797. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  798. if (must_run_delayed_refs) {
  799. btrfs_async_run_delayed_refs(info, cur, transid,
  800. must_run_delayed_refs == 1);
  801. }
  802. return err;
  803. }
  804. int btrfs_end_transaction(struct btrfs_trans_handle *trans)
  805. {
  806. return __btrfs_end_transaction(trans, 0);
  807. }
  808. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
  809. {
  810. return __btrfs_end_transaction(trans, 1);
  811. }
  812. /*
  813. * when btree blocks are allocated, they have some corresponding bits set for
  814. * them in one of two extent_io trees. This is used to make sure all of
  815. * those extents are sent to disk but does not wait on them
  816. */
  817. int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
  818. struct extent_io_tree *dirty_pages, int mark)
  819. {
  820. int err = 0;
  821. int werr = 0;
  822. struct address_space *mapping = fs_info->btree_inode->i_mapping;
  823. struct extent_state *cached_state = NULL;
  824. u64 start = 0;
  825. u64 end;
  826. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  827. mark, &cached_state)) {
  828. bool wait_writeback = false;
  829. err = convert_extent_bit(dirty_pages, start, end,
  830. EXTENT_NEED_WAIT,
  831. mark, &cached_state);
  832. /*
  833. * convert_extent_bit can return -ENOMEM, which is most of the
  834. * time a temporary error. So when it happens, ignore the error
  835. * and wait for writeback of this range to finish - because we
  836. * failed to set the bit EXTENT_NEED_WAIT for the range, a call
  837. * to __btrfs_wait_marked_extents() would not know that
  838. * writeback for this range started and therefore wouldn't
  839. * wait for it to finish - we don't want to commit a
  840. * superblock that points to btree nodes/leafs for which
  841. * writeback hasn't finished yet (and without errors).
  842. * We cleanup any entries left in the io tree when committing
  843. * the transaction (through clear_btree_io_tree()).
  844. */
  845. if (err == -ENOMEM) {
  846. err = 0;
  847. wait_writeback = true;
  848. }
  849. if (!err)
  850. err = filemap_fdatawrite_range(mapping, start, end);
  851. if (err)
  852. werr = err;
  853. else if (wait_writeback)
  854. werr = filemap_fdatawait_range(mapping, start, end);
  855. free_extent_state(cached_state);
  856. cached_state = NULL;
  857. cond_resched();
  858. start = end + 1;
  859. }
  860. return werr;
  861. }
  862. /*
  863. * when btree blocks are allocated, they have some corresponding bits set for
  864. * them in one of two extent_io trees. This is used to make sure all of
  865. * those extents are on disk for transaction or log commit. We wait
  866. * on all the pages and clear them from the dirty pages state tree
  867. */
  868. static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
  869. struct extent_io_tree *dirty_pages)
  870. {
  871. int err = 0;
  872. int werr = 0;
  873. struct address_space *mapping = fs_info->btree_inode->i_mapping;
  874. struct extent_state *cached_state = NULL;
  875. u64 start = 0;
  876. u64 end;
  877. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  878. EXTENT_NEED_WAIT, &cached_state)) {
  879. /*
  880. * Ignore -ENOMEM errors returned by clear_extent_bit().
  881. * When committing the transaction, we'll remove any entries
  882. * left in the io tree. For a log commit, we don't remove them
  883. * after committing the log because the tree can be accessed
  884. * concurrently - we do it only at transaction commit time when
  885. * it's safe to do it (through clear_btree_io_tree()).
  886. */
  887. err = clear_extent_bit(dirty_pages, start, end,
  888. EXTENT_NEED_WAIT,
  889. 0, 0, &cached_state, GFP_NOFS);
  890. if (err == -ENOMEM)
  891. err = 0;
  892. if (!err)
  893. err = filemap_fdatawait_range(mapping, start, end);
  894. if (err)
  895. werr = err;
  896. free_extent_state(cached_state);
  897. cached_state = NULL;
  898. cond_resched();
  899. start = end + 1;
  900. }
  901. if (err)
  902. werr = err;
  903. return werr;
  904. }
  905. int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
  906. struct extent_io_tree *dirty_pages)
  907. {
  908. bool errors = false;
  909. int err;
  910. err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
  911. if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
  912. errors = true;
  913. if (errors && !err)
  914. err = -EIO;
  915. return err;
  916. }
  917. int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
  918. {
  919. struct btrfs_fs_info *fs_info = log_root->fs_info;
  920. struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
  921. bool errors = false;
  922. int err;
  923. ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
  924. err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
  925. if ((mark & EXTENT_DIRTY) &&
  926. test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
  927. errors = true;
  928. if ((mark & EXTENT_NEW) &&
  929. test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
  930. errors = true;
  931. if (errors && !err)
  932. err = -EIO;
  933. return err;
  934. }
  935. /*
  936. * when btree blocks are allocated, they have some corresponding bits set for
  937. * them in one of two extent_io trees. This is used to make sure all of
  938. * those extents are on disk for transaction or log commit
  939. */
  940. static int btrfs_write_and_wait_marked_extents(struct btrfs_fs_info *fs_info,
  941. struct extent_io_tree *dirty_pages, int mark)
  942. {
  943. int ret;
  944. int ret2;
  945. struct blk_plug plug;
  946. blk_start_plug(&plug);
  947. ret = btrfs_write_marked_extents(fs_info, dirty_pages, mark);
  948. blk_finish_plug(&plug);
  949. ret2 = btrfs_wait_extents(fs_info, dirty_pages);
  950. if (ret)
  951. return ret;
  952. if (ret2)
  953. return ret2;
  954. return 0;
  955. }
  956. static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  957. struct btrfs_fs_info *fs_info)
  958. {
  959. int ret;
  960. ret = btrfs_write_and_wait_marked_extents(fs_info,
  961. &trans->transaction->dirty_pages,
  962. EXTENT_DIRTY);
  963. clear_btree_io_tree(&trans->transaction->dirty_pages);
  964. return ret;
  965. }
  966. /*
  967. * this is used to update the root pointer in the tree of tree roots.
  968. *
  969. * But, in the case of the extent allocation tree, updating the root
  970. * pointer may allocate blocks which may change the root of the extent
  971. * allocation tree.
  972. *
  973. * So, this loops and repeats and makes sure the cowonly root didn't
  974. * change while the root pointer was being updated in the metadata.
  975. */
  976. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  977. struct btrfs_root *root)
  978. {
  979. int ret;
  980. u64 old_root_bytenr;
  981. u64 old_root_used;
  982. struct btrfs_fs_info *fs_info = root->fs_info;
  983. struct btrfs_root *tree_root = fs_info->tree_root;
  984. old_root_used = btrfs_root_used(&root->root_item);
  985. while (1) {
  986. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  987. if (old_root_bytenr == root->node->start &&
  988. old_root_used == btrfs_root_used(&root->root_item))
  989. break;
  990. btrfs_set_root_node(&root->root_item, root->node);
  991. ret = btrfs_update_root(trans, tree_root,
  992. &root->root_key,
  993. &root->root_item);
  994. if (ret)
  995. return ret;
  996. old_root_used = btrfs_root_used(&root->root_item);
  997. }
  998. return 0;
  999. }
  1000. /*
  1001. * update all the cowonly tree roots on disk
  1002. *
  1003. * The error handling in this function may not be obvious. Any of the
  1004. * failures will cause the file system to go offline. We still need
  1005. * to clean up the delayed refs.
  1006. */
  1007. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  1008. struct btrfs_fs_info *fs_info)
  1009. {
  1010. struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
  1011. struct list_head *io_bgs = &trans->transaction->io_bgs;
  1012. struct list_head *next;
  1013. struct extent_buffer *eb;
  1014. int ret;
  1015. eb = btrfs_lock_root_node(fs_info->tree_root);
  1016. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  1017. 0, &eb);
  1018. btrfs_tree_unlock(eb);
  1019. free_extent_buffer(eb);
  1020. if (ret)
  1021. return ret;
  1022. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1023. if (ret)
  1024. return ret;
  1025. ret = btrfs_run_dev_stats(trans, fs_info);
  1026. if (ret)
  1027. return ret;
  1028. ret = btrfs_run_dev_replace(trans, fs_info);
  1029. if (ret)
  1030. return ret;
  1031. ret = btrfs_run_qgroups(trans, fs_info);
  1032. if (ret)
  1033. return ret;
  1034. ret = btrfs_setup_space_cache(trans, fs_info);
  1035. if (ret)
  1036. return ret;
  1037. /* run_qgroups might have added some more refs */
  1038. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1039. if (ret)
  1040. return ret;
  1041. again:
  1042. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  1043. struct btrfs_root *root;
  1044. next = fs_info->dirty_cowonly_roots.next;
  1045. list_del_init(next);
  1046. root = list_entry(next, struct btrfs_root, dirty_list);
  1047. clear_bit(BTRFS_ROOT_DIRTY, &root->state);
  1048. if (root != fs_info->extent_root)
  1049. list_add_tail(&root->dirty_list,
  1050. &trans->transaction->switch_commits);
  1051. ret = update_cowonly_root(trans, root);
  1052. if (ret)
  1053. return ret;
  1054. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1055. if (ret)
  1056. return ret;
  1057. }
  1058. while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
  1059. ret = btrfs_write_dirty_block_groups(trans, fs_info);
  1060. if (ret)
  1061. return ret;
  1062. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1063. if (ret)
  1064. return ret;
  1065. }
  1066. if (!list_empty(&fs_info->dirty_cowonly_roots))
  1067. goto again;
  1068. list_add_tail(&fs_info->extent_root->dirty_list,
  1069. &trans->transaction->switch_commits);
  1070. btrfs_after_dev_replace_commit(fs_info);
  1071. return 0;
  1072. }
  1073. /*
  1074. * dead roots are old snapshots that need to be deleted. This allocates
  1075. * a dirty root struct and adds it into the list of dead roots that need to
  1076. * be deleted
  1077. */
  1078. void btrfs_add_dead_root(struct btrfs_root *root)
  1079. {
  1080. struct btrfs_fs_info *fs_info = root->fs_info;
  1081. spin_lock(&fs_info->trans_lock);
  1082. if (list_empty(&root->root_list))
  1083. list_add_tail(&root->root_list, &fs_info->dead_roots);
  1084. spin_unlock(&fs_info->trans_lock);
  1085. }
  1086. /*
  1087. * update all the cowonly tree roots on disk
  1088. */
  1089. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  1090. struct btrfs_fs_info *fs_info)
  1091. {
  1092. struct btrfs_root *gang[8];
  1093. int i;
  1094. int ret;
  1095. int err = 0;
  1096. spin_lock(&fs_info->fs_roots_radix_lock);
  1097. while (1) {
  1098. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  1099. (void **)gang, 0,
  1100. ARRAY_SIZE(gang),
  1101. BTRFS_ROOT_TRANS_TAG);
  1102. if (ret == 0)
  1103. break;
  1104. for (i = 0; i < ret; i++) {
  1105. struct btrfs_root *root = gang[i];
  1106. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  1107. (unsigned long)root->root_key.objectid,
  1108. BTRFS_ROOT_TRANS_TAG);
  1109. spin_unlock(&fs_info->fs_roots_radix_lock);
  1110. btrfs_free_log(trans, root);
  1111. btrfs_update_reloc_root(trans, root);
  1112. btrfs_orphan_commit_root(trans, root);
  1113. btrfs_save_ino_cache(root, trans);
  1114. /* see comments in should_cow_block() */
  1115. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1116. smp_mb__after_atomic();
  1117. if (root->commit_root != root->node) {
  1118. list_add_tail(&root->dirty_list,
  1119. &trans->transaction->switch_commits);
  1120. btrfs_set_root_node(&root->root_item,
  1121. root->node);
  1122. }
  1123. err = btrfs_update_root(trans, fs_info->tree_root,
  1124. &root->root_key,
  1125. &root->root_item);
  1126. spin_lock(&fs_info->fs_roots_radix_lock);
  1127. if (err)
  1128. break;
  1129. btrfs_qgroup_free_meta_all(root);
  1130. }
  1131. }
  1132. spin_unlock(&fs_info->fs_roots_radix_lock);
  1133. return err;
  1134. }
  1135. /*
  1136. * defrag a given btree.
  1137. * Every leaf in the btree is read and defragged.
  1138. */
  1139. int btrfs_defrag_root(struct btrfs_root *root)
  1140. {
  1141. struct btrfs_fs_info *info = root->fs_info;
  1142. struct btrfs_trans_handle *trans;
  1143. int ret;
  1144. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  1145. return 0;
  1146. while (1) {
  1147. trans = btrfs_start_transaction(root, 0);
  1148. if (IS_ERR(trans))
  1149. return PTR_ERR(trans);
  1150. ret = btrfs_defrag_leaves(trans, root);
  1151. btrfs_end_transaction(trans);
  1152. btrfs_btree_balance_dirty(info);
  1153. cond_resched();
  1154. if (btrfs_fs_closing(info) || ret != -EAGAIN)
  1155. break;
  1156. if (btrfs_defrag_cancelled(info)) {
  1157. btrfs_debug(info, "defrag_root cancelled");
  1158. ret = -EAGAIN;
  1159. break;
  1160. }
  1161. }
  1162. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  1163. return ret;
  1164. }
  1165. /*
  1166. * Do all special snapshot related qgroup dirty hack.
  1167. *
  1168. * Will do all needed qgroup inherit and dirty hack like switch commit
  1169. * roots inside one transaction and write all btree into disk, to make
  1170. * qgroup works.
  1171. */
  1172. static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
  1173. struct btrfs_root *src,
  1174. struct btrfs_root *parent,
  1175. struct btrfs_qgroup_inherit *inherit,
  1176. u64 dst_objectid)
  1177. {
  1178. struct btrfs_fs_info *fs_info = src->fs_info;
  1179. int ret;
  1180. /*
  1181. * Save some performance in the case that qgroups are not
  1182. * enabled. If this check races with the ioctl, rescan will
  1183. * kick in anyway.
  1184. */
  1185. if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
  1186. return 0;
  1187. /*
  1188. * We are going to commit transaction, see btrfs_commit_transaction()
  1189. * comment for reason locking tree_log_mutex
  1190. */
  1191. mutex_lock(&fs_info->tree_log_mutex);
  1192. ret = commit_fs_roots(trans, fs_info);
  1193. if (ret)
  1194. goto out;
  1195. ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
  1196. if (ret < 0)
  1197. goto out;
  1198. ret = btrfs_qgroup_account_extents(trans, fs_info);
  1199. if (ret < 0)
  1200. goto out;
  1201. /* Now qgroup are all updated, we can inherit it to new qgroups */
  1202. ret = btrfs_qgroup_inherit(trans, fs_info,
  1203. src->root_key.objectid, dst_objectid,
  1204. inherit);
  1205. if (ret < 0)
  1206. goto out;
  1207. /*
  1208. * Now we do a simplified commit transaction, which will:
  1209. * 1) commit all subvolume and extent tree
  1210. * To ensure all subvolume and extent tree have a valid
  1211. * commit_root to accounting later insert_dir_item()
  1212. * 2) write all btree blocks onto disk
  1213. * This is to make sure later btree modification will be cowed
  1214. * Or commit_root can be populated and cause wrong qgroup numbers
  1215. * In this simplified commit, we don't really care about other trees
  1216. * like chunk and root tree, as they won't affect qgroup.
  1217. * And we don't write super to avoid half committed status.
  1218. */
  1219. ret = commit_cowonly_roots(trans, fs_info);
  1220. if (ret)
  1221. goto out;
  1222. switch_commit_roots(trans->transaction, fs_info);
  1223. ret = btrfs_write_and_wait_transaction(trans, fs_info);
  1224. if (ret)
  1225. btrfs_handle_fs_error(fs_info, ret,
  1226. "Error while writing out transaction for qgroup");
  1227. out:
  1228. mutex_unlock(&fs_info->tree_log_mutex);
  1229. /*
  1230. * Force parent root to be updated, as we recorded it before so its
  1231. * last_trans == cur_transid.
  1232. * Or it won't be committed again onto disk after later
  1233. * insert_dir_item()
  1234. */
  1235. if (!ret)
  1236. record_root_in_trans(trans, parent, 1);
  1237. return ret;
  1238. }
  1239. /*
  1240. * new snapshots need to be created at a very specific time in the
  1241. * transaction commit. This does the actual creation.
  1242. *
  1243. * Note:
  1244. * If the error which may affect the commitment of the current transaction
  1245. * happens, we should return the error number. If the error which just affect
  1246. * the creation of the pending snapshots, just return 0.
  1247. */
  1248. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  1249. struct btrfs_fs_info *fs_info,
  1250. struct btrfs_pending_snapshot *pending)
  1251. {
  1252. struct btrfs_key key;
  1253. struct btrfs_root_item *new_root_item;
  1254. struct btrfs_root *tree_root = fs_info->tree_root;
  1255. struct btrfs_root *root = pending->root;
  1256. struct btrfs_root *parent_root;
  1257. struct btrfs_block_rsv *rsv;
  1258. struct inode *parent_inode;
  1259. struct btrfs_path *path;
  1260. struct btrfs_dir_item *dir_item;
  1261. struct dentry *dentry;
  1262. struct extent_buffer *tmp;
  1263. struct extent_buffer *old;
  1264. struct timespec cur_time;
  1265. int ret = 0;
  1266. u64 to_reserve = 0;
  1267. u64 index = 0;
  1268. u64 objectid;
  1269. u64 root_flags;
  1270. uuid_le new_uuid;
  1271. ASSERT(pending->path);
  1272. path = pending->path;
  1273. ASSERT(pending->root_item);
  1274. new_root_item = pending->root_item;
  1275. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1276. if (pending->error)
  1277. goto no_free_objectid;
  1278. /*
  1279. * Make qgroup to skip current new snapshot's qgroupid, as it is
  1280. * accounted by later btrfs_qgroup_inherit().
  1281. */
  1282. btrfs_set_skip_qgroup(trans, objectid);
  1283. btrfs_reloc_pre_snapshot(pending, &to_reserve);
  1284. if (to_reserve > 0) {
  1285. pending->error = btrfs_block_rsv_add(root,
  1286. &pending->block_rsv,
  1287. to_reserve,
  1288. BTRFS_RESERVE_NO_FLUSH);
  1289. if (pending->error)
  1290. goto clear_skip_qgroup;
  1291. }
  1292. key.objectid = objectid;
  1293. key.offset = (u64)-1;
  1294. key.type = BTRFS_ROOT_ITEM_KEY;
  1295. rsv = trans->block_rsv;
  1296. trans->block_rsv = &pending->block_rsv;
  1297. trans->bytes_reserved = trans->block_rsv->reserved;
  1298. trace_btrfs_space_reservation(fs_info, "transaction",
  1299. trans->transid,
  1300. trans->bytes_reserved, 1);
  1301. dentry = pending->dentry;
  1302. parent_inode = pending->dir;
  1303. parent_root = BTRFS_I(parent_inode)->root;
  1304. record_root_in_trans(trans, parent_root, 0);
  1305. cur_time = current_time(parent_inode);
  1306. /*
  1307. * insert the directory item
  1308. */
  1309. ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
  1310. BUG_ON(ret); /* -ENOMEM */
  1311. /* check if there is a file/dir which has the same name. */
  1312. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1313. btrfs_ino(BTRFS_I(parent_inode)),
  1314. dentry->d_name.name,
  1315. dentry->d_name.len, 0);
  1316. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1317. pending->error = -EEXIST;
  1318. goto dir_item_existed;
  1319. } else if (IS_ERR(dir_item)) {
  1320. ret = PTR_ERR(dir_item);
  1321. btrfs_abort_transaction(trans, ret);
  1322. goto fail;
  1323. }
  1324. btrfs_release_path(path);
  1325. /*
  1326. * pull in the delayed directory update
  1327. * and the delayed inode item
  1328. * otherwise we corrupt the FS during
  1329. * snapshot
  1330. */
  1331. ret = btrfs_run_delayed_items(trans, fs_info);
  1332. if (ret) { /* Transaction aborted */
  1333. btrfs_abort_transaction(trans, ret);
  1334. goto fail;
  1335. }
  1336. record_root_in_trans(trans, root, 0);
  1337. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1338. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1339. btrfs_check_and_init_root_item(new_root_item);
  1340. root_flags = btrfs_root_flags(new_root_item);
  1341. if (pending->readonly)
  1342. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1343. else
  1344. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1345. btrfs_set_root_flags(new_root_item, root_flags);
  1346. btrfs_set_root_generation_v2(new_root_item,
  1347. trans->transid);
  1348. uuid_le_gen(&new_uuid);
  1349. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1350. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1351. BTRFS_UUID_SIZE);
  1352. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1353. memset(new_root_item->received_uuid, 0,
  1354. sizeof(new_root_item->received_uuid));
  1355. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1356. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1357. btrfs_set_root_stransid(new_root_item, 0);
  1358. btrfs_set_root_rtransid(new_root_item, 0);
  1359. }
  1360. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1361. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1362. btrfs_set_root_otransid(new_root_item, trans->transid);
  1363. old = btrfs_lock_root_node(root);
  1364. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1365. if (ret) {
  1366. btrfs_tree_unlock(old);
  1367. free_extent_buffer(old);
  1368. btrfs_abort_transaction(trans, ret);
  1369. goto fail;
  1370. }
  1371. btrfs_set_lock_blocking(old);
  1372. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1373. /* clean up in any case */
  1374. btrfs_tree_unlock(old);
  1375. free_extent_buffer(old);
  1376. if (ret) {
  1377. btrfs_abort_transaction(trans, ret);
  1378. goto fail;
  1379. }
  1380. /* see comments in should_cow_block() */
  1381. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1382. smp_wmb();
  1383. btrfs_set_root_node(new_root_item, tmp);
  1384. /* record when the snapshot was created in key.offset */
  1385. key.offset = trans->transid;
  1386. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1387. btrfs_tree_unlock(tmp);
  1388. free_extent_buffer(tmp);
  1389. if (ret) {
  1390. btrfs_abort_transaction(trans, ret);
  1391. goto fail;
  1392. }
  1393. /*
  1394. * insert root back/forward references
  1395. */
  1396. ret = btrfs_add_root_ref(trans, fs_info, objectid,
  1397. parent_root->root_key.objectid,
  1398. btrfs_ino(BTRFS_I(parent_inode)), index,
  1399. dentry->d_name.name, dentry->d_name.len);
  1400. if (ret) {
  1401. btrfs_abort_transaction(trans, ret);
  1402. goto fail;
  1403. }
  1404. key.offset = (u64)-1;
  1405. pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
  1406. if (IS_ERR(pending->snap)) {
  1407. ret = PTR_ERR(pending->snap);
  1408. btrfs_abort_transaction(trans, ret);
  1409. goto fail;
  1410. }
  1411. ret = btrfs_reloc_post_snapshot(trans, pending);
  1412. if (ret) {
  1413. btrfs_abort_transaction(trans, ret);
  1414. goto fail;
  1415. }
  1416. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1417. if (ret) {
  1418. btrfs_abort_transaction(trans, ret);
  1419. goto fail;
  1420. }
  1421. /*
  1422. * Do special qgroup accounting for snapshot, as we do some qgroup
  1423. * snapshot hack to do fast snapshot.
  1424. * To co-operate with that hack, we do hack again.
  1425. * Or snapshot will be greatly slowed down by a subtree qgroup rescan
  1426. */
  1427. ret = qgroup_account_snapshot(trans, root, parent_root,
  1428. pending->inherit, objectid);
  1429. if (ret < 0)
  1430. goto fail;
  1431. ret = btrfs_insert_dir_item(trans, parent_root,
  1432. dentry->d_name.name, dentry->d_name.len,
  1433. BTRFS_I(parent_inode), &key,
  1434. BTRFS_FT_DIR, index);
  1435. /* We have check then name at the beginning, so it is impossible. */
  1436. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1437. if (ret) {
  1438. btrfs_abort_transaction(trans, ret);
  1439. goto fail;
  1440. }
  1441. btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
  1442. dentry->d_name.len * 2);
  1443. parent_inode->i_mtime = parent_inode->i_ctime =
  1444. current_time(parent_inode);
  1445. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1446. if (ret) {
  1447. btrfs_abort_transaction(trans, ret);
  1448. goto fail;
  1449. }
  1450. ret = btrfs_uuid_tree_add(trans, fs_info, new_uuid.b,
  1451. BTRFS_UUID_KEY_SUBVOL, objectid);
  1452. if (ret) {
  1453. btrfs_abort_transaction(trans, ret);
  1454. goto fail;
  1455. }
  1456. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1457. ret = btrfs_uuid_tree_add(trans, fs_info,
  1458. new_root_item->received_uuid,
  1459. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1460. objectid);
  1461. if (ret && ret != -EEXIST) {
  1462. btrfs_abort_transaction(trans, ret);
  1463. goto fail;
  1464. }
  1465. }
  1466. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1467. if (ret) {
  1468. btrfs_abort_transaction(trans, ret);
  1469. goto fail;
  1470. }
  1471. fail:
  1472. pending->error = ret;
  1473. dir_item_existed:
  1474. trans->block_rsv = rsv;
  1475. trans->bytes_reserved = 0;
  1476. clear_skip_qgroup:
  1477. btrfs_clear_skip_qgroup(trans);
  1478. no_free_objectid:
  1479. kfree(new_root_item);
  1480. pending->root_item = NULL;
  1481. btrfs_free_path(path);
  1482. pending->path = NULL;
  1483. return ret;
  1484. }
  1485. /*
  1486. * create all the snapshots we've scheduled for creation
  1487. */
  1488. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1489. struct btrfs_fs_info *fs_info)
  1490. {
  1491. struct btrfs_pending_snapshot *pending, *next;
  1492. struct list_head *head = &trans->transaction->pending_snapshots;
  1493. int ret = 0;
  1494. list_for_each_entry_safe(pending, next, head, list) {
  1495. list_del(&pending->list);
  1496. ret = create_pending_snapshot(trans, fs_info, pending);
  1497. if (ret)
  1498. break;
  1499. }
  1500. return ret;
  1501. }
  1502. static void update_super_roots(struct btrfs_fs_info *fs_info)
  1503. {
  1504. struct btrfs_root_item *root_item;
  1505. struct btrfs_super_block *super;
  1506. super = fs_info->super_copy;
  1507. root_item = &fs_info->chunk_root->root_item;
  1508. super->chunk_root = root_item->bytenr;
  1509. super->chunk_root_generation = root_item->generation;
  1510. super->chunk_root_level = root_item->level;
  1511. root_item = &fs_info->tree_root->root_item;
  1512. super->root = root_item->bytenr;
  1513. super->generation = root_item->generation;
  1514. super->root_level = root_item->level;
  1515. if (btrfs_test_opt(fs_info, SPACE_CACHE))
  1516. super->cache_generation = root_item->generation;
  1517. if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
  1518. super->uuid_tree_generation = root_item->generation;
  1519. }
  1520. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1521. {
  1522. struct btrfs_transaction *trans;
  1523. int ret = 0;
  1524. spin_lock(&info->trans_lock);
  1525. trans = info->running_transaction;
  1526. if (trans)
  1527. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1528. spin_unlock(&info->trans_lock);
  1529. return ret;
  1530. }
  1531. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1532. {
  1533. struct btrfs_transaction *trans;
  1534. int ret = 0;
  1535. spin_lock(&info->trans_lock);
  1536. trans = info->running_transaction;
  1537. if (trans)
  1538. ret = is_transaction_blocked(trans);
  1539. spin_unlock(&info->trans_lock);
  1540. return ret;
  1541. }
  1542. /*
  1543. * wait for the current transaction commit to start and block subsequent
  1544. * transaction joins
  1545. */
  1546. static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
  1547. struct btrfs_transaction *trans)
  1548. {
  1549. wait_event(fs_info->transaction_blocked_wait,
  1550. trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
  1551. }
  1552. /*
  1553. * wait for the current transaction to start and then become unblocked.
  1554. * caller holds ref.
  1555. */
  1556. static void wait_current_trans_commit_start_and_unblock(
  1557. struct btrfs_fs_info *fs_info,
  1558. struct btrfs_transaction *trans)
  1559. {
  1560. wait_event(fs_info->transaction_wait,
  1561. trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
  1562. }
  1563. /*
  1564. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1565. * returns, any subsequent transaction will not be allowed to join.
  1566. */
  1567. struct btrfs_async_commit {
  1568. struct btrfs_trans_handle *newtrans;
  1569. struct work_struct work;
  1570. };
  1571. static void do_async_commit(struct work_struct *work)
  1572. {
  1573. struct btrfs_async_commit *ac =
  1574. container_of(work, struct btrfs_async_commit, work);
  1575. /*
  1576. * We've got freeze protection passed with the transaction.
  1577. * Tell lockdep about it.
  1578. */
  1579. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1580. __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
  1581. current->journal_info = ac->newtrans;
  1582. btrfs_commit_transaction(ac->newtrans);
  1583. kfree(ac);
  1584. }
  1585. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1586. int wait_for_unblock)
  1587. {
  1588. struct btrfs_fs_info *fs_info = trans->fs_info;
  1589. struct btrfs_async_commit *ac;
  1590. struct btrfs_transaction *cur_trans;
  1591. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1592. if (!ac)
  1593. return -ENOMEM;
  1594. INIT_WORK(&ac->work, do_async_commit);
  1595. ac->newtrans = btrfs_join_transaction(trans->root);
  1596. if (IS_ERR(ac->newtrans)) {
  1597. int err = PTR_ERR(ac->newtrans);
  1598. kfree(ac);
  1599. return err;
  1600. }
  1601. /* take transaction reference */
  1602. cur_trans = trans->transaction;
  1603. refcount_inc(&cur_trans->use_count);
  1604. btrfs_end_transaction(trans);
  1605. /*
  1606. * Tell lockdep we've released the freeze rwsem, since the
  1607. * async commit thread will be the one to unlock it.
  1608. */
  1609. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1610. __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
  1611. schedule_work(&ac->work);
  1612. /* wait for transaction to start and unblock */
  1613. if (wait_for_unblock)
  1614. wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
  1615. else
  1616. wait_current_trans_commit_start(fs_info, cur_trans);
  1617. if (current->journal_info == trans)
  1618. current->journal_info = NULL;
  1619. btrfs_put_transaction(cur_trans);
  1620. return 0;
  1621. }
  1622. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1623. struct btrfs_root *root, int err)
  1624. {
  1625. struct btrfs_fs_info *fs_info = root->fs_info;
  1626. struct btrfs_transaction *cur_trans = trans->transaction;
  1627. DEFINE_WAIT(wait);
  1628. WARN_ON(trans->use_count > 1);
  1629. btrfs_abort_transaction(trans, err);
  1630. spin_lock(&fs_info->trans_lock);
  1631. /*
  1632. * If the transaction is removed from the list, it means this
  1633. * transaction has been committed successfully, so it is impossible
  1634. * to call the cleanup function.
  1635. */
  1636. BUG_ON(list_empty(&cur_trans->list));
  1637. list_del_init(&cur_trans->list);
  1638. if (cur_trans == fs_info->running_transaction) {
  1639. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1640. spin_unlock(&fs_info->trans_lock);
  1641. wait_event(cur_trans->writer_wait,
  1642. atomic_read(&cur_trans->num_writers) == 1);
  1643. spin_lock(&fs_info->trans_lock);
  1644. }
  1645. spin_unlock(&fs_info->trans_lock);
  1646. btrfs_cleanup_one_transaction(trans->transaction, fs_info);
  1647. spin_lock(&fs_info->trans_lock);
  1648. if (cur_trans == fs_info->running_transaction)
  1649. fs_info->running_transaction = NULL;
  1650. spin_unlock(&fs_info->trans_lock);
  1651. if (trans->type & __TRANS_FREEZABLE)
  1652. sb_end_intwrite(fs_info->sb);
  1653. btrfs_put_transaction(cur_trans);
  1654. btrfs_put_transaction(cur_trans);
  1655. trace_btrfs_transaction_commit(root);
  1656. if (current->journal_info == trans)
  1657. current->journal_info = NULL;
  1658. btrfs_scrub_cancel(fs_info);
  1659. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1660. }
  1661. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1662. {
  1663. if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
  1664. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1665. return 0;
  1666. }
  1667. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1668. {
  1669. if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
  1670. btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
  1671. }
  1672. static inline void
  1673. btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
  1674. {
  1675. wait_event(cur_trans->pending_wait,
  1676. atomic_read(&cur_trans->pending_ordered) == 0);
  1677. }
  1678. int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
  1679. {
  1680. struct btrfs_fs_info *fs_info = trans->fs_info;
  1681. struct btrfs_transaction *cur_trans = trans->transaction;
  1682. struct btrfs_transaction *prev_trans = NULL;
  1683. int ret;
  1684. /* Stop the commit early if ->aborted is set */
  1685. if (unlikely(READ_ONCE(cur_trans->aborted))) {
  1686. ret = cur_trans->aborted;
  1687. btrfs_end_transaction(trans);
  1688. return ret;
  1689. }
  1690. /* make a pass through all the delayed refs we have so far
  1691. * any runnings procs may add more while we are here
  1692. */
  1693. ret = btrfs_run_delayed_refs(trans, fs_info, 0);
  1694. if (ret) {
  1695. btrfs_end_transaction(trans);
  1696. return ret;
  1697. }
  1698. btrfs_trans_release_metadata(trans, fs_info);
  1699. trans->block_rsv = NULL;
  1700. cur_trans = trans->transaction;
  1701. /*
  1702. * set the flushing flag so procs in this transaction have to
  1703. * start sending their work down.
  1704. */
  1705. cur_trans->delayed_refs.flushing = 1;
  1706. smp_wmb();
  1707. if (!list_empty(&trans->new_bgs))
  1708. btrfs_create_pending_block_groups(trans, fs_info);
  1709. ret = btrfs_run_delayed_refs(trans, fs_info, 0);
  1710. if (ret) {
  1711. btrfs_end_transaction(trans);
  1712. return ret;
  1713. }
  1714. if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
  1715. int run_it = 0;
  1716. /* this mutex is also taken before trying to set
  1717. * block groups readonly. We need to make sure
  1718. * that nobody has set a block group readonly
  1719. * after a extents from that block group have been
  1720. * allocated for cache files. btrfs_set_block_group_ro
  1721. * will wait for the transaction to commit if it
  1722. * finds BTRFS_TRANS_DIRTY_BG_RUN set.
  1723. *
  1724. * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
  1725. * only one process starts all the block group IO. It wouldn't
  1726. * hurt to have more than one go through, but there's no
  1727. * real advantage to it either.
  1728. */
  1729. mutex_lock(&fs_info->ro_block_group_mutex);
  1730. if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
  1731. &cur_trans->flags))
  1732. run_it = 1;
  1733. mutex_unlock(&fs_info->ro_block_group_mutex);
  1734. if (run_it)
  1735. ret = btrfs_start_dirty_block_groups(trans, fs_info);
  1736. }
  1737. if (ret) {
  1738. btrfs_end_transaction(trans);
  1739. return ret;
  1740. }
  1741. spin_lock(&fs_info->trans_lock);
  1742. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1743. spin_unlock(&fs_info->trans_lock);
  1744. refcount_inc(&cur_trans->use_count);
  1745. ret = btrfs_end_transaction(trans);
  1746. wait_for_commit(cur_trans);
  1747. if (unlikely(cur_trans->aborted))
  1748. ret = cur_trans->aborted;
  1749. btrfs_put_transaction(cur_trans);
  1750. return ret;
  1751. }
  1752. cur_trans->state = TRANS_STATE_COMMIT_START;
  1753. wake_up(&fs_info->transaction_blocked_wait);
  1754. if (cur_trans->list.prev != &fs_info->trans_list) {
  1755. prev_trans = list_entry(cur_trans->list.prev,
  1756. struct btrfs_transaction, list);
  1757. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1758. refcount_inc(&prev_trans->use_count);
  1759. spin_unlock(&fs_info->trans_lock);
  1760. wait_for_commit(prev_trans);
  1761. ret = prev_trans->aborted;
  1762. btrfs_put_transaction(prev_trans);
  1763. if (ret)
  1764. goto cleanup_transaction;
  1765. } else {
  1766. spin_unlock(&fs_info->trans_lock);
  1767. }
  1768. } else {
  1769. spin_unlock(&fs_info->trans_lock);
  1770. }
  1771. extwriter_counter_dec(cur_trans, trans->type);
  1772. ret = btrfs_start_delalloc_flush(fs_info);
  1773. if (ret)
  1774. goto cleanup_transaction;
  1775. ret = btrfs_run_delayed_items(trans, fs_info);
  1776. if (ret)
  1777. goto cleanup_transaction;
  1778. wait_event(cur_trans->writer_wait,
  1779. extwriter_counter_read(cur_trans) == 0);
  1780. /* some pending stuffs might be added after the previous flush. */
  1781. ret = btrfs_run_delayed_items(trans, fs_info);
  1782. if (ret)
  1783. goto cleanup_transaction;
  1784. btrfs_wait_delalloc_flush(fs_info);
  1785. btrfs_wait_pending_ordered(cur_trans);
  1786. btrfs_scrub_pause(fs_info);
  1787. /*
  1788. * Ok now we need to make sure to block out any other joins while we
  1789. * commit the transaction. We could have started a join before setting
  1790. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1791. */
  1792. spin_lock(&fs_info->trans_lock);
  1793. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1794. spin_unlock(&fs_info->trans_lock);
  1795. wait_event(cur_trans->writer_wait,
  1796. atomic_read(&cur_trans->num_writers) == 1);
  1797. /* ->aborted might be set after the previous check, so check it */
  1798. if (unlikely(READ_ONCE(cur_trans->aborted))) {
  1799. ret = cur_trans->aborted;
  1800. goto scrub_continue;
  1801. }
  1802. /*
  1803. * the reloc mutex makes sure that we stop
  1804. * the balancing code from coming in and moving
  1805. * extents around in the middle of the commit
  1806. */
  1807. mutex_lock(&fs_info->reloc_mutex);
  1808. /*
  1809. * We needn't worry about the delayed items because we will
  1810. * deal with them in create_pending_snapshot(), which is the
  1811. * core function of the snapshot creation.
  1812. */
  1813. ret = create_pending_snapshots(trans, fs_info);
  1814. if (ret) {
  1815. mutex_unlock(&fs_info->reloc_mutex);
  1816. goto scrub_continue;
  1817. }
  1818. /*
  1819. * We insert the dir indexes of the snapshots and update the inode
  1820. * of the snapshots' parents after the snapshot creation, so there
  1821. * are some delayed items which are not dealt with. Now deal with
  1822. * them.
  1823. *
  1824. * We needn't worry that this operation will corrupt the snapshots,
  1825. * because all the tree which are snapshoted will be forced to COW
  1826. * the nodes and leaves.
  1827. */
  1828. ret = btrfs_run_delayed_items(trans, fs_info);
  1829. if (ret) {
  1830. mutex_unlock(&fs_info->reloc_mutex);
  1831. goto scrub_continue;
  1832. }
  1833. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1834. if (ret) {
  1835. mutex_unlock(&fs_info->reloc_mutex);
  1836. goto scrub_continue;
  1837. }
  1838. /*
  1839. * make sure none of the code above managed to slip in a
  1840. * delayed item
  1841. */
  1842. btrfs_assert_delayed_root_empty(fs_info);
  1843. WARN_ON(cur_trans != trans->transaction);
  1844. /* btrfs_commit_tree_roots is responsible for getting the
  1845. * various roots consistent with each other. Every pointer
  1846. * in the tree of tree roots has to point to the most up to date
  1847. * root for every subvolume and other tree. So, we have to keep
  1848. * the tree logging code from jumping in and changing any
  1849. * of the trees.
  1850. *
  1851. * At this point in the commit, there can't be any tree-log
  1852. * writers, but a little lower down we drop the trans mutex
  1853. * and let new people in. By holding the tree_log_mutex
  1854. * from now until after the super is written, we avoid races
  1855. * with the tree-log code.
  1856. */
  1857. mutex_lock(&fs_info->tree_log_mutex);
  1858. ret = commit_fs_roots(trans, fs_info);
  1859. if (ret) {
  1860. mutex_unlock(&fs_info->tree_log_mutex);
  1861. mutex_unlock(&fs_info->reloc_mutex);
  1862. goto scrub_continue;
  1863. }
  1864. /*
  1865. * Since the transaction is done, we can apply the pending changes
  1866. * before the next transaction.
  1867. */
  1868. btrfs_apply_pending_changes(fs_info);
  1869. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1870. * safe to free the root of tree log roots
  1871. */
  1872. btrfs_free_log_root_tree(trans, fs_info);
  1873. /*
  1874. * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
  1875. * new delayed refs. Must handle them or qgroup can be wrong.
  1876. */
  1877. ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
  1878. if (ret) {
  1879. mutex_unlock(&fs_info->tree_log_mutex);
  1880. mutex_unlock(&fs_info->reloc_mutex);
  1881. goto scrub_continue;
  1882. }
  1883. ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
  1884. if (ret) {
  1885. mutex_unlock(&fs_info->tree_log_mutex);
  1886. mutex_unlock(&fs_info->reloc_mutex);
  1887. goto scrub_continue;
  1888. }
  1889. /*
  1890. * Since fs roots are all committed, we can get a quite accurate
  1891. * new_roots. So let's do quota accounting.
  1892. */
  1893. ret = btrfs_qgroup_account_extents(trans, fs_info);
  1894. if (ret < 0) {
  1895. mutex_unlock(&fs_info->tree_log_mutex);
  1896. mutex_unlock(&fs_info->reloc_mutex);
  1897. goto scrub_continue;
  1898. }
  1899. ret = commit_cowonly_roots(trans, fs_info);
  1900. if (ret) {
  1901. mutex_unlock(&fs_info->tree_log_mutex);
  1902. mutex_unlock(&fs_info->reloc_mutex);
  1903. goto scrub_continue;
  1904. }
  1905. /*
  1906. * The tasks which save the space cache and inode cache may also
  1907. * update ->aborted, check it.
  1908. */
  1909. if (unlikely(READ_ONCE(cur_trans->aborted))) {
  1910. ret = cur_trans->aborted;
  1911. mutex_unlock(&fs_info->tree_log_mutex);
  1912. mutex_unlock(&fs_info->reloc_mutex);
  1913. goto scrub_continue;
  1914. }
  1915. btrfs_prepare_extent_commit(fs_info);
  1916. cur_trans = fs_info->running_transaction;
  1917. btrfs_set_root_node(&fs_info->tree_root->root_item,
  1918. fs_info->tree_root->node);
  1919. list_add_tail(&fs_info->tree_root->dirty_list,
  1920. &cur_trans->switch_commits);
  1921. btrfs_set_root_node(&fs_info->chunk_root->root_item,
  1922. fs_info->chunk_root->node);
  1923. list_add_tail(&fs_info->chunk_root->dirty_list,
  1924. &cur_trans->switch_commits);
  1925. switch_commit_roots(cur_trans, fs_info);
  1926. ASSERT(list_empty(&cur_trans->dirty_bgs));
  1927. ASSERT(list_empty(&cur_trans->io_bgs));
  1928. update_super_roots(fs_info);
  1929. btrfs_set_super_log_root(fs_info->super_copy, 0);
  1930. btrfs_set_super_log_root_level(fs_info->super_copy, 0);
  1931. memcpy(fs_info->super_for_commit, fs_info->super_copy,
  1932. sizeof(*fs_info->super_copy));
  1933. btrfs_update_commit_device_size(fs_info);
  1934. btrfs_update_commit_device_bytes_used(fs_info, cur_trans);
  1935. clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
  1936. clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
  1937. btrfs_trans_release_chunk_metadata(trans);
  1938. spin_lock(&fs_info->trans_lock);
  1939. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1940. fs_info->running_transaction = NULL;
  1941. spin_unlock(&fs_info->trans_lock);
  1942. mutex_unlock(&fs_info->reloc_mutex);
  1943. wake_up(&fs_info->transaction_wait);
  1944. ret = btrfs_write_and_wait_transaction(trans, fs_info);
  1945. if (ret) {
  1946. btrfs_handle_fs_error(fs_info, ret,
  1947. "Error while writing out transaction");
  1948. mutex_unlock(&fs_info->tree_log_mutex);
  1949. goto scrub_continue;
  1950. }
  1951. ret = write_all_supers(fs_info, 0);
  1952. if (ret) {
  1953. mutex_unlock(&fs_info->tree_log_mutex);
  1954. goto scrub_continue;
  1955. }
  1956. /*
  1957. * the super is written, we can safely allow the tree-loggers
  1958. * to go about their business
  1959. */
  1960. mutex_unlock(&fs_info->tree_log_mutex);
  1961. btrfs_finish_extent_commit(trans, fs_info);
  1962. if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
  1963. btrfs_clear_space_info_full(fs_info);
  1964. fs_info->last_trans_committed = cur_trans->transid;
  1965. /*
  1966. * We needn't acquire the lock here because there is no other task
  1967. * which can change it.
  1968. */
  1969. cur_trans->state = TRANS_STATE_COMPLETED;
  1970. wake_up(&cur_trans->commit_wait);
  1971. spin_lock(&fs_info->trans_lock);
  1972. list_del_init(&cur_trans->list);
  1973. spin_unlock(&fs_info->trans_lock);
  1974. btrfs_put_transaction(cur_trans);
  1975. btrfs_put_transaction(cur_trans);
  1976. if (trans->type & __TRANS_FREEZABLE)
  1977. sb_end_intwrite(fs_info->sb);
  1978. trace_btrfs_transaction_commit(trans->root);
  1979. btrfs_scrub_continue(fs_info);
  1980. if (current->journal_info == trans)
  1981. current->journal_info = NULL;
  1982. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1983. /*
  1984. * If fs has been frozen, we can not handle delayed iputs, otherwise
  1985. * it'll result in deadlock about SB_FREEZE_FS.
  1986. */
  1987. if (current != fs_info->transaction_kthread &&
  1988. current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
  1989. btrfs_run_delayed_iputs(fs_info);
  1990. return ret;
  1991. scrub_continue:
  1992. btrfs_scrub_continue(fs_info);
  1993. cleanup_transaction:
  1994. btrfs_trans_release_metadata(trans, fs_info);
  1995. btrfs_trans_release_chunk_metadata(trans);
  1996. trans->block_rsv = NULL;
  1997. btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
  1998. if (current->journal_info == trans)
  1999. current->journal_info = NULL;
  2000. cleanup_transaction(trans, trans->root, ret);
  2001. return ret;
  2002. }
  2003. /*
  2004. * return < 0 if error
  2005. * 0 if there are no more dead_roots at the time of call
  2006. * 1 there are more to be processed, call me again
  2007. *
  2008. * The return value indicates there are certainly more snapshots to delete, but
  2009. * if there comes a new one during processing, it may return 0. We don't mind,
  2010. * because btrfs_commit_super will poke cleaner thread and it will process it a
  2011. * few seconds later.
  2012. */
  2013. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  2014. {
  2015. int ret;
  2016. struct btrfs_fs_info *fs_info = root->fs_info;
  2017. spin_lock(&fs_info->trans_lock);
  2018. if (list_empty(&fs_info->dead_roots)) {
  2019. spin_unlock(&fs_info->trans_lock);
  2020. return 0;
  2021. }
  2022. root = list_first_entry(&fs_info->dead_roots,
  2023. struct btrfs_root, root_list);
  2024. list_del_init(&root->root_list);
  2025. spin_unlock(&fs_info->trans_lock);
  2026. btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
  2027. btrfs_kill_all_delayed_nodes(root);
  2028. if (btrfs_header_backref_rev(root->node) <
  2029. BTRFS_MIXED_BACKREF_REV)
  2030. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  2031. else
  2032. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  2033. return (ret < 0) ? 0 : 1;
  2034. }
  2035. void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
  2036. {
  2037. unsigned long prev;
  2038. unsigned long bit;
  2039. prev = xchg(&fs_info->pending_changes, 0);
  2040. if (!prev)
  2041. return;
  2042. bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
  2043. if (prev & bit)
  2044. btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  2045. prev &= ~bit;
  2046. bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
  2047. if (prev & bit)
  2048. btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  2049. prev &= ~bit;
  2050. bit = 1 << BTRFS_PENDING_COMMIT;
  2051. if (prev & bit)
  2052. btrfs_debug(fs_info, "pending commit done");
  2053. prev &= ~bit;
  2054. if (prev)
  2055. btrfs_warn(fs_info,
  2056. "unknown pending changes left 0x%lx, ignoring", prev);
  2057. }