transaction.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(atomic_read(&transaction->use_count) == 0);
  60. if (atomic_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. if (transaction->delayed_refs.pending_csums)
  64. printk(KERN_ERR "pending csums is %llu\n",
  65. transaction->delayed_refs.pending_csums);
  66. while (!list_empty(&transaction->pending_chunks)) {
  67. struct extent_map *em;
  68. em = list_first_entry(&transaction->pending_chunks,
  69. struct extent_map, list);
  70. list_del_init(&em->list);
  71. free_extent_map(em);
  72. }
  73. kmem_cache_free(btrfs_transaction_cachep, transaction);
  74. }
  75. }
  76. static void clear_btree_io_tree(struct extent_io_tree *tree)
  77. {
  78. spin_lock(&tree->lock);
  79. while (!RB_EMPTY_ROOT(&tree->state)) {
  80. struct rb_node *node;
  81. struct extent_state *state;
  82. node = rb_first(&tree->state);
  83. state = rb_entry(node, struct extent_state, rb_node);
  84. rb_erase(&state->rb_node, &tree->state);
  85. RB_CLEAR_NODE(&state->rb_node);
  86. /*
  87. * btree io trees aren't supposed to have tasks waiting for
  88. * changes in the flags of extent states ever.
  89. */
  90. ASSERT(!waitqueue_active(&state->wq));
  91. free_extent_state(state);
  92. cond_resched_lock(&tree->lock);
  93. }
  94. spin_unlock(&tree->lock);
  95. }
  96. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  97. struct btrfs_fs_info *fs_info)
  98. {
  99. struct btrfs_root *root, *tmp;
  100. down_write(&fs_info->commit_root_sem);
  101. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  102. dirty_list) {
  103. list_del_init(&root->dirty_list);
  104. free_extent_buffer(root->commit_root);
  105. root->commit_root = btrfs_root_node(root);
  106. if (is_fstree(root->objectid))
  107. btrfs_unpin_free_ino(root);
  108. clear_btree_io_tree(&root->dirty_log_pages);
  109. }
  110. /* We can free old roots now. */
  111. spin_lock(&trans->dropped_roots_lock);
  112. while (!list_empty(&trans->dropped_roots)) {
  113. root = list_first_entry(&trans->dropped_roots,
  114. struct btrfs_root, root_list);
  115. list_del_init(&root->root_list);
  116. spin_unlock(&trans->dropped_roots_lock);
  117. btrfs_drop_and_free_fs_root(fs_info, root);
  118. spin_lock(&trans->dropped_roots_lock);
  119. }
  120. spin_unlock(&trans->dropped_roots_lock);
  121. up_write(&fs_info->commit_root_sem);
  122. }
  123. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  124. unsigned int type)
  125. {
  126. if (type & TRANS_EXTWRITERS)
  127. atomic_inc(&trans->num_extwriters);
  128. }
  129. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  130. unsigned int type)
  131. {
  132. if (type & TRANS_EXTWRITERS)
  133. atomic_dec(&trans->num_extwriters);
  134. }
  135. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  136. unsigned int type)
  137. {
  138. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  139. }
  140. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  141. {
  142. return atomic_read(&trans->num_extwriters);
  143. }
  144. /*
  145. * either allocate a new transaction or hop into the existing one
  146. */
  147. static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
  148. {
  149. struct btrfs_transaction *cur_trans;
  150. struct btrfs_fs_info *fs_info = root->fs_info;
  151. spin_lock(&fs_info->trans_lock);
  152. loop:
  153. /* The file system has been taken offline. No new transactions. */
  154. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  155. spin_unlock(&fs_info->trans_lock);
  156. return -EROFS;
  157. }
  158. cur_trans = fs_info->running_transaction;
  159. if (cur_trans) {
  160. if (cur_trans->aborted) {
  161. spin_unlock(&fs_info->trans_lock);
  162. return cur_trans->aborted;
  163. }
  164. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  165. spin_unlock(&fs_info->trans_lock);
  166. return -EBUSY;
  167. }
  168. atomic_inc(&cur_trans->use_count);
  169. atomic_inc(&cur_trans->num_writers);
  170. extwriter_counter_inc(cur_trans, type);
  171. spin_unlock(&fs_info->trans_lock);
  172. return 0;
  173. }
  174. spin_unlock(&fs_info->trans_lock);
  175. /*
  176. * If we are ATTACH, we just want to catch the current transaction,
  177. * and commit it. If there is no transaction, just return ENOENT.
  178. */
  179. if (type == TRANS_ATTACH)
  180. return -ENOENT;
  181. /*
  182. * JOIN_NOLOCK only happens during the transaction commit, so
  183. * it is impossible that ->running_transaction is NULL
  184. */
  185. BUG_ON(type == TRANS_JOIN_NOLOCK);
  186. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  187. if (!cur_trans)
  188. return -ENOMEM;
  189. spin_lock(&fs_info->trans_lock);
  190. if (fs_info->running_transaction) {
  191. /*
  192. * someone started a transaction after we unlocked. Make sure
  193. * to redo the checks above
  194. */
  195. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  196. goto loop;
  197. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  198. spin_unlock(&fs_info->trans_lock);
  199. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  200. return -EROFS;
  201. }
  202. atomic_set(&cur_trans->num_writers, 1);
  203. extwriter_counter_init(cur_trans, type);
  204. init_waitqueue_head(&cur_trans->writer_wait);
  205. init_waitqueue_head(&cur_trans->commit_wait);
  206. cur_trans->state = TRANS_STATE_RUNNING;
  207. /*
  208. * One for this trans handle, one so it will live on until we
  209. * commit the transaction.
  210. */
  211. atomic_set(&cur_trans->use_count, 2);
  212. cur_trans->have_free_bgs = 0;
  213. cur_trans->start_time = get_seconds();
  214. cur_trans->dirty_bg_run = 0;
  215. cur_trans->delayed_refs.href_root = RB_ROOT;
  216. cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
  217. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  218. cur_trans->delayed_refs.num_heads_ready = 0;
  219. cur_trans->delayed_refs.pending_csums = 0;
  220. cur_trans->delayed_refs.num_heads = 0;
  221. cur_trans->delayed_refs.flushing = 0;
  222. cur_trans->delayed_refs.run_delayed_start = 0;
  223. cur_trans->delayed_refs.qgroup_to_skip = 0;
  224. /*
  225. * although the tree mod log is per file system and not per transaction,
  226. * the log must never go across transaction boundaries.
  227. */
  228. smp_mb();
  229. if (!list_empty(&fs_info->tree_mod_seq_list))
  230. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
  231. "creating a fresh transaction\n");
  232. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  233. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
  234. "creating a fresh transaction\n");
  235. atomic64_set(&fs_info->tree_mod_seq, 0);
  236. spin_lock_init(&cur_trans->delayed_refs.lock);
  237. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  238. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  239. INIT_LIST_HEAD(&cur_trans->switch_commits);
  240. INIT_LIST_HEAD(&cur_trans->pending_ordered);
  241. INIT_LIST_HEAD(&cur_trans->dirty_bgs);
  242. INIT_LIST_HEAD(&cur_trans->io_bgs);
  243. INIT_LIST_HEAD(&cur_trans->dropped_roots);
  244. mutex_init(&cur_trans->cache_write_mutex);
  245. cur_trans->num_dirty_bgs = 0;
  246. spin_lock_init(&cur_trans->dirty_bgs_lock);
  247. INIT_LIST_HEAD(&cur_trans->deleted_bgs);
  248. spin_lock_init(&cur_trans->deleted_bgs_lock);
  249. spin_lock_init(&cur_trans->dropped_roots_lock);
  250. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  251. extent_io_tree_init(&cur_trans->dirty_pages,
  252. fs_info->btree_inode->i_mapping);
  253. fs_info->generation++;
  254. cur_trans->transid = fs_info->generation;
  255. fs_info->running_transaction = cur_trans;
  256. cur_trans->aborted = 0;
  257. spin_unlock(&fs_info->trans_lock);
  258. return 0;
  259. }
  260. /*
  261. * this does all the record keeping required to make sure that a reference
  262. * counted root is properly recorded in a given transaction. This is required
  263. * to make sure the old root from before we joined the transaction is deleted
  264. * when the transaction commits
  265. */
  266. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  267. struct btrfs_root *root)
  268. {
  269. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  270. root->last_trans < trans->transid) {
  271. WARN_ON(root == root->fs_info->extent_root);
  272. WARN_ON(root->commit_root != root->node);
  273. /*
  274. * see below for IN_TRANS_SETUP usage rules
  275. * we have the reloc mutex held now, so there
  276. * is only one writer in this function
  277. */
  278. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  279. /* make sure readers find IN_TRANS_SETUP before
  280. * they find our root->last_trans update
  281. */
  282. smp_wmb();
  283. spin_lock(&root->fs_info->fs_roots_radix_lock);
  284. if (root->last_trans == trans->transid) {
  285. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  286. return 0;
  287. }
  288. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  289. (unsigned long)root->root_key.objectid,
  290. BTRFS_ROOT_TRANS_TAG);
  291. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  292. root->last_trans = trans->transid;
  293. /* this is pretty tricky. We don't want to
  294. * take the relocation lock in btrfs_record_root_in_trans
  295. * unless we're really doing the first setup for this root in
  296. * this transaction.
  297. *
  298. * Normally we'd use root->last_trans as a flag to decide
  299. * if we want to take the expensive mutex.
  300. *
  301. * But, we have to set root->last_trans before we
  302. * init the relocation root, otherwise, we trip over warnings
  303. * in ctree.c. The solution used here is to flag ourselves
  304. * with root IN_TRANS_SETUP. When this is 1, we're still
  305. * fixing up the reloc trees and everyone must wait.
  306. *
  307. * When this is zero, they can trust root->last_trans and fly
  308. * through btrfs_record_root_in_trans without having to take the
  309. * lock. smp_wmb() makes sure that all the writes above are
  310. * done before we pop in the zero below
  311. */
  312. btrfs_init_reloc_root(trans, root);
  313. smp_mb__before_atomic();
  314. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  315. }
  316. return 0;
  317. }
  318. void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
  319. struct btrfs_root *root)
  320. {
  321. struct btrfs_transaction *cur_trans = trans->transaction;
  322. /* Add ourselves to the transaction dropped list */
  323. spin_lock(&cur_trans->dropped_roots_lock);
  324. list_add_tail(&root->root_list, &cur_trans->dropped_roots);
  325. spin_unlock(&cur_trans->dropped_roots_lock);
  326. /* Make sure we don't try to update the root at commit time */
  327. spin_lock(&root->fs_info->fs_roots_radix_lock);
  328. radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
  329. (unsigned long)root->root_key.objectid,
  330. BTRFS_ROOT_TRANS_TAG);
  331. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  332. }
  333. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  334. struct btrfs_root *root)
  335. {
  336. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  337. return 0;
  338. /*
  339. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  340. * and barriers
  341. */
  342. smp_rmb();
  343. if (root->last_trans == trans->transid &&
  344. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  345. return 0;
  346. mutex_lock(&root->fs_info->reloc_mutex);
  347. record_root_in_trans(trans, root);
  348. mutex_unlock(&root->fs_info->reloc_mutex);
  349. return 0;
  350. }
  351. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  352. {
  353. return (trans->state >= TRANS_STATE_BLOCKED &&
  354. trans->state < TRANS_STATE_UNBLOCKED &&
  355. !trans->aborted);
  356. }
  357. /* wait for commit against the current transaction to become unblocked
  358. * when this is done, it is safe to start a new transaction, but the current
  359. * transaction might not be fully on disk.
  360. */
  361. static void wait_current_trans(struct btrfs_root *root)
  362. {
  363. struct btrfs_transaction *cur_trans;
  364. spin_lock(&root->fs_info->trans_lock);
  365. cur_trans = root->fs_info->running_transaction;
  366. if (cur_trans && is_transaction_blocked(cur_trans)) {
  367. atomic_inc(&cur_trans->use_count);
  368. spin_unlock(&root->fs_info->trans_lock);
  369. wait_event(root->fs_info->transaction_wait,
  370. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  371. cur_trans->aborted);
  372. btrfs_put_transaction(cur_trans);
  373. } else {
  374. spin_unlock(&root->fs_info->trans_lock);
  375. }
  376. }
  377. static int may_wait_transaction(struct btrfs_root *root, int type)
  378. {
  379. if (root->fs_info->log_root_recovering)
  380. return 0;
  381. if (type == TRANS_USERSPACE)
  382. return 1;
  383. if (type == TRANS_START &&
  384. !atomic_read(&root->fs_info->open_ioctl_trans))
  385. return 1;
  386. return 0;
  387. }
  388. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  389. {
  390. if (!root->fs_info->reloc_ctl ||
  391. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  392. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  393. root->reloc_root)
  394. return false;
  395. return true;
  396. }
  397. static struct btrfs_trans_handle *
  398. start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
  399. enum btrfs_reserve_flush_enum flush)
  400. {
  401. struct btrfs_trans_handle *h;
  402. struct btrfs_transaction *cur_trans;
  403. u64 num_bytes = 0;
  404. u64 qgroup_reserved = 0;
  405. bool reloc_reserved = false;
  406. int ret;
  407. /* Send isn't supposed to start transactions. */
  408. ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
  409. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  410. return ERR_PTR(-EROFS);
  411. if (current->journal_info) {
  412. WARN_ON(type & TRANS_EXTWRITERS);
  413. h = current->journal_info;
  414. h->use_count++;
  415. WARN_ON(h->use_count > 2);
  416. h->orig_rsv = h->block_rsv;
  417. h->block_rsv = NULL;
  418. goto got_it;
  419. }
  420. /*
  421. * Do the reservation before we join the transaction so we can do all
  422. * the appropriate flushing if need be.
  423. */
  424. if (num_items > 0 && root != root->fs_info->chunk_root) {
  425. if (root->fs_info->quota_enabled &&
  426. is_fstree(root->root_key.objectid)) {
  427. qgroup_reserved = num_items * root->nodesize;
  428. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  429. if (ret)
  430. return ERR_PTR(ret);
  431. }
  432. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  433. /*
  434. * Do the reservation for the relocation root creation
  435. */
  436. if (need_reserve_reloc_root(root)) {
  437. num_bytes += root->nodesize;
  438. reloc_reserved = true;
  439. }
  440. ret = btrfs_block_rsv_add(root,
  441. &root->fs_info->trans_block_rsv,
  442. num_bytes, flush);
  443. if (ret)
  444. goto reserve_fail;
  445. }
  446. again:
  447. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  448. if (!h) {
  449. ret = -ENOMEM;
  450. goto alloc_fail;
  451. }
  452. /*
  453. * If we are JOIN_NOLOCK we're already committing a transaction and
  454. * waiting on this guy, so we don't need to do the sb_start_intwrite
  455. * because we're already holding a ref. We need this because we could
  456. * have raced in and did an fsync() on a file which can kick a commit
  457. * and then we deadlock with somebody doing a freeze.
  458. *
  459. * If we are ATTACH, it means we just want to catch the current
  460. * transaction and commit it, so we needn't do sb_start_intwrite().
  461. */
  462. if (type & __TRANS_FREEZABLE)
  463. sb_start_intwrite(root->fs_info->sb);
  464. if (may_wait_transaction(root, type))
  465. wait_current_trans(root);
  466. do {
  467. ret = join_transaction(root, type);
  468. if (ret == -EBUSY) {
  469. wait_current_trans(root);
  470. if (unlikely(type == TRANS_ATTACH))
  471. ret = -ENOENT;
  472. }
  473. } while (ret == -EBUSY);
  474. if (ret < 0) {
  475. /* We must get the transaction if we are JOIN_NOLOCK. */
  476. BUG_ON(type == TRANS_JOIN_NOLOCK);
  477. goto join_fail;
  478. }
  479. cur_trans = root->fs_info->running_transaction;
  480. h->transid = cur_trans->transid;
  481. h->transaction = cur_trans;
  482. h->blocks_used = 0;
  483. h->bytes_reserved = 0;
  484. h->chunk_bytes_reserved = 0;
  485. h->root = root;
  486. h->delayed_ref_updates = 0;
  487. h->use_count = 1;
  488. h->adding_csums = 0;
  489. h->block_rsv = NULL;
  490. h->orig_rsv = NULL;
  491. h->aborted = 0;
  492. h->qgroup_reserved = 0;
  493. h->delayed_ref_elem.seq = 0;
  494. h->type = type;
  495. h->allocating_chunk = false;
  496. h->reloc_reserved = false;
  497. h->sync = false;
  498. INIT_LIST_HEAD(&h->qgroup_ref_list);
  499. INIT_LIST_HEAD(&h->new_bgs);
  500. INIT_LIST_HEAD(&h->ordered);
  501. smp_mb();
  502. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  503. may_wait_transaction(root, type)) {
  504. current->journal_info = h;
  505. btrfs_commit_transaction(h, root);
  506. goto again;
  507. }
  508. if (num_bytes) {
  509. trace_btrfs_space_reservation(root->fs_info, "transaction",
  510. h->transid, num_bytes, 1);
  511. h->block_rsv = &root->fs_info->trans_block_rsv;
  512. h->bytes_reserved = num_bytes;
  513. h->reloc_reserved = reloc_reserved;
  514. }
  515. h->qgroup_reserved = qgroup_reserved;
  516. got_it:
  517. btrfs_record_root_in_trans(h, root);
  518. if (!current->journal_info && type != TRANS_USERSPACE)
  519. current->journal_info = h;
  520. return h;
  521. join_fail:
  522. if (type & __TRANS_FREEZABLE)
  523. sb_end_intwrite(root->fs_info->sb);
  524. kmem_cache_free(btrfs_trans_handle_cachep, h);
  525. alloc_fail:
  526. if (num_bytes)
  527. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  528. num_bytes);
  529. reserve_fail:
  530. if (qgroup_reserved)
  531. btrfs_qgroup_free(root, qgroup_reserved);
  532. return ERR_PTR(ret);
  533. }
  534. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  535. int num_items)
  536. {
  537. return start_transaction(root, num_items, TRANS_START,
  538. BTRFS_RESERVE_FLUSH_ALL);
  539. }
  540. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  541. struct btrfs_root *root, int num_items)
  542. {
  543. return start_transaction(root, num_items, TRANS_START,
  544. BTRFS_RESERVE_FLUSH_LIMIT);
  545. }
  546. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  547. {
  548. return start_transaction(root, 0, TRANS_JOIN, 0);
  549. }
  550. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  551. {
  552. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  553. }
  554. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  555. {
  556. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  557. }
  558. /*
  559. * btrfs_attach_transaction() - catch the running transaction
  560. *
  561. * It is used when we want to commit the current the transaction, but
  562. * don't want to start a new one.
  563. *
  564. * Note: If this function return -ENOENT, it just means there is no
  565. * running transaction. But it is possible that the inactive transaction
  566. * is still in the memory, not fully on disk. If you hope there is no
  567. * inactive transaction in the fs when -ENOENT is returned, you should
  568. * invoke
  569. * btrfs_attach_transaction_barrier()
  570. */
  571. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  572. {
  573. return start_transaction(root, 0, TRANS_ATTACH, 0);
  574. }
  575. /*
  576. * btrfs_attach_transaction_barrier() - catch the running transaction
  577. *
  578. * It is similar to the above function, the differentia is this one
  579. * will wait for all the inactive transactions until they fully
  580. * complete.
  581. */
  582. struct btrfs_trans_handle *
  583. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  584. {
  585. struct btrfs_trans_handle *trans;
  586. trans = start_transaction(root, 0, TRANS_ATTACH, 0);
  587. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  588. btrfs_wait_for_commit(root, 0);
  589. return trans;
  590. }
  591. /* wait for a transaction commit to be fully complete */
  592. static noinline void wait_for_commit(struct btrfs_root *root,
  593. struct btrfs_transaction *commit)
  594. {
  595. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  596. }
  597. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  598. {
  599. struct btrfs_transaction *cur_trans = NULL, *t;
  600. int ret = 0;
  601. if (transid) {
  602. if (transid <= root->fs_info->last_trans_committed)
  603. goto out;
  604. /* find specified transaction */
  605. spin_lock(&root->fs_info->trans_lock);
  606. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  607. if (t->transid == transid) {
  608. cur_trans = t;
  609. atomic_inc(&cur_trans->use_count);
  610. ret = 0;
  611. break;
  612. }
  613. if (t->transid > transid) {
  614. ret = 0;
  615. break;
  616. }
  617. }
  618. spin_unlock(&root->fs_info->trans_lock);
  619. /*
  620. * The specified transaction doesn't exist, or we
  621. * raced with btrfs_commit_transaction
  622. */
  623. if (!cur_trans) {
  624. if (transid > root->fs_info->last_trans_committed)
  625. ret = -EINVAL;
  626. goto out;
  627. }
  628. } else {
  629. /* find newest transaction that is committing | committed */
  630. spin_lock(&root->fs_info->trans_lock);
  631. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  632. list) {
  633. if (t->state >= TRANS_STATE_COMMIT_START) {
  634. if (t->state == TRANS_STATE_COMPLETED)
  635. break;
  636. cur_trans = t;
  637. atomic_inc(&cur_trans->use_count);
  638. break;
  639. }
  640. }
  641. spin_unlock(&root->fs_info->trans_lock);
  642. if (!cur_trans)
  643. goto out; /* nothing committing|committed */
  644. }
  645. wait_for_commit(root, cur_trans);
  646. btrfs_put_transaction(cur_trans);
  647. out:
  648. return ret;
  649. }
  650. void btrfs_throttle(struct btrfs_root *root)
  651. {
  652. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  653. wait_current_trans(root);
  654. }
  655. static int should_end_transaction(struct btrfs_trans_handle *trans,
  656. struct btrfs_root *root)
  657. {
  658. if (root->fs_info->global_block_rsv.space_info->full &&
  659. btrfs_check_space_for_delayed_refs(trans, root))
  660. return 1;
  661. return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  662. }
  663. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  664. struct btrfs_root *root)
  665. {
  666. struct btrfs_transaction *cur_trans = trans->transaction;
  667. int updates;
  668. int err;
  669. smp_mb();
  670. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  671. cur_trans->delayed_refs.flushing)
  672. return 1;
  673. updates = trans->delayed_ref_updates;
  674. trans->delayed_ref_updates = 0;
  675. if (updates) {
  676. err = btrfs_run_delayed_refs(trans, root, updates * 2);
  677. if (err) /* Error code will also eval true */
  678. return err;
  679. }
  680. return should_end_transaction(trans, root);
  681. }
  682. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  683. struct btrfs_root *root, int throttle)
  684. {
  685. struct btrfs_transaction *cur_trans = trans->transaction;
  686. struct btrfs_fs_info *info = root->fs_info;
  687. unsigned long cur = trans->delayed_ref_updates;
  688. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  689. int err = 0;
  690. int must_run_delayed_refs = 0;
  691. if (trans->use_count > 1) {
  692. trans->use_count--;
  693. trans->block_rsv = trans->orig_rsv;
  694. return 0;
  695. }
  696. btrfs_trans_release_metadata(trans, root);
  697. trans->block_rsv = NULL;
  698. if (!list_empty(&trans->new_bgs))
  699. btrfs_create_pending_block_groups(trans, root);
  700. if (!list_empty(&trans->ordered)) {
  701. spin_lock(&info->trans_lock);
  702. list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
  703. spin_unlock(&info->trans_lock);
  704. }
  705. trans->delayed_ref_updates = 0;
  706. if (!trans->sync) {
  707. must_run_delayed_refs =
  708. btrfs_should_throttle_delayed_refs(trans, root);
  709. cur = max_t(unsigned long, cur, 32);
  710. /*
  711. * don't make the caller wait if they are from a NOLOCK
  712. * or ATTACH transaction, it will deadlock with commit
  713. */
  714. if (must_run_delayed_refs == 1 &&
  715. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  716. must_run_delayed_refs = 2;
  717. }
  718. if (trans->qgroup_reserved) {
  719. /*
  720. * the same root has to be passed here between start_transaction
  721. * and end_transaction. Subvolume quota depends on this.
  722. */
  723. btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
  724. trans->qgroup_reserved = 0;
  725. }
  726. btrfs_trans_release_metadata(trans, root);
  727. trans->block_rsv = NULL;
  728. if (!list_empty(&trans->new_bgs))
  729. btrfs_create_pending_block_groups(trans, root);
  730. btrfs_trans_release_chunk_metadata(trans);
  731. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  732. should_end_transaction(trans, root) &&
  733. ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  734. spin_lock(&info->trans_lock);
  735. if (cur_trans->state == TRANS_STATE_RUNNING)
  736. cur_trans->state = TRANS_STATE_BLOCKED;
  737. spin_unlock(&info->trans_lock);
  738. }
  739. if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  740. if (throttle)
  741. return btrfs_commit_transaction(trans, root);
  742. else
  743. wake_up_process(info->transaction_kthread);
  744. }
  745. if (trans->type & __TRANS_FREEZABLE)
  746. sb_end_intwrite(root->fs_info->sb);
  747. WARN_ON(cur_trans != info->running_transaction);
  748. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  749. atomic_dec(&cur_trans->num_writers);
  750. extwriter_counter_dec(cur_trans, trans->type);
  751. smp_mb();
  752. if (waitqueue_active(&cur_trans->writer_wait))
  753. wake_up(&cur_trans->writer_wait);
  754. btrfs_put_transaction(cur_trans);
  755. if (current->journal_info == trans)
  756. current->journal_info = NULL;
  757. if (throttle)
  758. btrfs_run_delayed_iputs(root);
  759. if (trans->aborted ||
  760. test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  761. wake_up_process(info->transaction_kthread);
  762. err = -EIO;
  763. }
  764. assert_qgroups_uptodate(trans);
  765. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  766. if (must_run_delayed_refs) {
  767. btrfs_async_run_delayed_refs(root, cur,
  768. must_run_delayed_refs == 1);
  769. }
  770. return err;
  771. }
  772. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  773. struct btrfs_root *root)
  774. {
  775. return __btrfs_end_transaction(trans, root, 0);
  776. }
  777. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  778. struct btrfs_root *root)
  779. {
  780. return __btrfs_end_transaction(trans, root, 1);
  781. }
  782. /*
  783. * when btree blocks are allocated, they have some corresponding bits set for
  784. * them in one of two extent_io trees. This is used to make sure all of
  785. * those extents are sent to disk but does not wait on them
  786. */
  787. int btrfs_write_marked_extents(struct btrfs_root *root,
  788. struct extent_io_tree *dirty_pages, int mark)
  789. {
  790. int err = 0;
  791. int werr = 0;
  792. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  793. struct extent_state *cached_state = NULL;
  794. u64 start = 0;
  795. u64 end;
  796. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  797. mark, &cached_state)) {
  798. bool wait_writeback = false;
  799. err = convert_extent_bit(dirty_pages, start, end,
  800. EXTENT_NEED_WAIT,
  801. mark, &cached_state, GFP_NOFS);
  802. /*
  803. * convert_extent_bit can return -ENOMEM, which is most of the
  804. * time a temporary error. So when it happens, ignore the error
  805. * and wait for writeback of this range to finish - because we
  806. * failed to set the bit EXTENT_NEED_WAIT for the range, a call
  807. * to btrfs_wait_marked_extents() would not know that writeback
  808. * for this range started and therefore wouldn't wait for it to
  809. * finish - we don't want to commit a superblock that points to
  810. * btree nodes/leafs for which writeback hasn't finished yet
  811. * (and without errors).
  812. * We cleanup any entries left in the io tree when committing
  813. * the transaction (through clear_btree_io_tree()).
  814. */
  815. if (err == -ENOMEM) {
  816. err = 0;
  817. wait_writeback = true;
  818. }
  819. if (!err)
  820. err = filemap_fdatawrite_range(mapping, start, end);
  821. if (err)
  822. werr = err;
  823. else if (wait_writeback)
  824. werr = filemap_fdatawait_range(mapping, start, end);
  825. free_extent_state(cached_state);
  826. cached_state = NULL;
  827. cond_resched();
  828. start = end + 1;
  829. }
  830. return werr;
  831. }
  832. /*
  833. * when btree blocks are allocated, they have some corresponding bits set for
  834. * them in one of two extent_io trees. This is used to make sure all of
  835. * those extents are on disk for transaction or log commit. We wait
  836. * on all the pages and clear them from the dirty pages state tree
  837. */
  838. int btrfs_wait_marked_extents(struct btrfs_root *root,
  839. struct extent_io_tree *dirty_pages, int mark)
  840. {
  841. int err = 0;
  842. int werr = 0;
  843. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  844. struct extent_state *cached_state = NULL;
  845. u64 start = 0;
  846. u64 end;
  847. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  848. bool errors = false;
  849. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  850. EXTENT_NEED_WAIT, &cached_state)) {
  851. /*
  852. * Ignore -ENOMEM errors returned by clear_extent_bit().
  853. * When committing the transaction, we'll remove any entries
  854. * left in the io tree. For a log commit, we don't remove them
  855. * after committing the log because the tree can be accessed
  856. * concurrently - we do it only at transaction commit time when
  857. * it's safe to do it (through clear_btree_io_tree()).
  858. */
  859. err = clear_extent_bit(dirty_pages, start, end,
  860. EXTENT_NEED_WAIT,
  861. 0, 0, &cached_state, GFP_NOFS);
  862. if (err == -ENOMEM)
  863. err = 0;
  864. if (!err)
  865. err = filemap_fdatawait_range(mapping, start, end);
  866. if (err)
  867. werr = err;
  868. free_extent_state(cached_state);
  869. cached_state = NULL;
  870. cond_resched();
  871. start = end + 1;
  872. }
  873. if (err)
  874. werr = err;
  875. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  876. if ((mark & EXTENT_DIRTY) &&
  877. test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
  878. &btree_ino->runtime_flags))
  879. errors = true;
  880. if ((mark & EXTENT_NEW) &&
  881. test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
  882. &btree_ino->runtime_flags))
  883. errors = true;
  884. } else {
  885. if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
  886. &btree_ino->runtime_flags))
  887. errors = true;
  888. }
  889. if (errors && !werr)
  890. werr = -EIO;
  891. return werr;
  892. }
  893. /*
  894. * when btree blocks are allocated, they have some corresponding bits set for
  895. * them in one of two extent_io trees. This is used to make sure all of
  896. * those extents are on disk for transaction or log commit
  897. */
  898. static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  899. struct extent_io_tree *dirty_pages, int mark)
  900. {
  901. int ret;
  902. int ret2;
  903. struct blk_plug plug;
  904. blk_start_plug(&plug);
  905. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  906. blk_finish_plug(&plug);
  907. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  908. if (ret)
  909. return ret;
  910. if (ret2)
  911. return ret2;
  912. return 0;
  913. }
  914. static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  915. struct btrfs_root *root)
  916. {
  917. int ret;
  918. ret = btrfs_write_and_wait_marked_extents(root,
  919. &trans->transaction->dirty_pages,
  920. EXTENT_DIRTY);
  921. clear_btree_io_tree(&trans->transaction->dirty_pages);
  922. return ret;
  923. }
  924. /*
  925. * this is used to update the root pointer in the tree of tree roots.
  926. *
  927. * But, in the case of the extent allocation tree, updating the root
  928. * pointer may allocate blocks which may change the root of the extent
  929. * allocation tree.
  930. *
  931. * So, this loops and repeats and makes sure the cowonly root didn't
  932. * change while the root pointer was being updated in the metadata.
  933. */
  934. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  935. struct btrfs_root *root)
  936. {
  937. int ret;
  938. u64 old_root_bytenr;
  939. u64 old_root_used;
  940. struct btrfs_root *tree_root = root->fs_info->tree_root;
  941. old_root_used = btrfs_root_used(&root->root_item);
  942. while (1) {
  943. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  944. if (old_root_bytenr == root->node->start &&
  945. old_root_used == btrfs_root_used(&root->root_item))
  946. break;
  947. btrfs_set_root_node(&root->root_item, root->node);
  948. ret = btrfs_update_root(trans, tree_root,
  949. &root->root_key,
  950. &root->root_item);
  951. if (ret)
  952. return ret;
  953. old_root_used = btrfs_root_used(&root->root_item);
  954. }
  955. return 0;
  956. }
  957. /*
  958. * update all the cowonly tree roots on disk
  959. *
  960. * The error handling in this function may not be obvious. Any of the
  961. * failures will cause the file system to go offline. We still need
  962. * to clean up the delayed refs.
  963. */
  964. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  965. struct btrfs_root *root)
  966. {
  967. struct btrfs_fs_info *fs_info = root->fs_info;
  968. struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
  969. struct list_head *io_bgs = &trans->transaction->io_bgs;
  970. struct list_head *next;
  971. struct extent_buffer *eb;
  972. int ret;
  973. eb = btrfs_lock_root_node(fs_info->tree_root);
  974. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  975. 0, &eb);
  976. btrfs_tree_unlock(eb);
  977. free_extent_buffer(eb);
  978. if (ret)
  979. return ret;
  980. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  981. if (ret)
  982. return ret;
  983. ret = btrfs_run_dev_stats(trans, root->fs_info);
  984. if (ret)
  985. return ret;
  986. ret = btrfs_run_dev_replace(trans, root->fs_info);
  987. if (ret)
  988. return ret;
  989. ret = btrfs_run_qgroups(trans, root->fs_info);
  990. if (ret)
  991. return ret;
  992. ret = btrfs_setup_space_cache(trans, root);
  993. if (ret)
  994. return ret;
  995. /* run_qgroups might have added some more refs */
  996. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  997. if (ret)
  998. return ret;
  999. again:
  1000. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  1001. next = fs_info->dirty_cowonly_roots.next;
  1002. list_del_init(next);
  1003. root = list_entry(next, struct btrfs_root, dirty_list);
  1004. clear_bit(BTRFS_ROOT_DIRTY, &root->state);
  1005. if (root != fs_info->extent_root)
  1006. list_add_tail(&root->dirty_list,
  1007. &trans->transaction->switch_commits);
  1008. ret = update_cowonly_root(trans, root);
  1009. if (ret)
  1010. return ret;
  1011. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1012. if (ret)
  1013. return ret;
  1014. }
  1015. while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
  1016. ret = btrfs_write_dirty_block_groups(trans, root);
  1017. if (ret)
  1018. return ret;
  1019. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1020. if (ret)
  1021. return ret;
  1022. }
  1023. if (!list_empty(&fs_info->dirty_cowonly_roots))
  1024. goto again;
  1025. list_add_tail(&fs_info->extent_root->dirty_list,
  1026. &trans->transaction->switch_commits);
  1027. btrfs_after_dev_replace_commit(fs_info);
  1028. return 0;
  1029. }
  1030. /*
  1031. * dead roots are old snapshots that need to be deleted. This allocates
  1032. * a dirty root struct and adds it into the list of dead roots that need to
  1033. * be deleted
  1034. */
  1035. void btrfs_add_dead_root(struct btrfs_root *root)
  1036. {
  1037. spin_lock(&root->fs_info->trans_lock);
  1038. if (list_empty(&root->root_list))
  1039. list_add_tail(&root->root_list, &root->fs_info->dead_roots);
  1040. spin_unlock(&root->fs_info->trans_lock);
  1041. }
  1042. /*
  1043. * update all the cowonly tree roots on disk
  1044. */
  1045. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  1046. struct btrfs_root *root)
  1047. {
  1048. struct btrfs_root *gang[8];
  1049. struct btrfs_fs_info *fs_info = root->fs_info;
  1050. int i;
  1051. int ret;
  1052. int err = 0;
  1053. spin_lock(&fs_info->fs_roots_radix_lock);
  1054. while (1) {
  1055. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  1056. (void **)gang, 0,
  1057. ARRAY_SIZE(gang),
  1058. BTRFS_ROOT_TRANS_TAG);
  1059. if (ret == 0)
  1060. break;
  1061. for (i = 0; i < ret; i++) {
  1062. root = gang[i];
  1063. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  1064. (unsigned long)root->root_key.objectid,
  1065. BTRFS_ROOT_TRANS_TAG);
  1066. spin_unlock(&fs_info->fs_roots_radix_lock);
  1067. btrfs_free_log(trans, root);
  1068. btrfs_update_reloc_root(trans, root);
  1069. btrfs_orphan_commit_root(trans, root);
  1070. btrfs_save_ino_cache(root, trans);
  1071. /* see comments in should_cow_block() */
  1072. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1073. smp_mb__after_atomic();
  1074. if (root->commit_root != root->node) {
  1075. list_add_tail(&root->dirty_list,
  1076. &trans->transaction->switch_commits);
  1077. btrfs_set_root_node(&root->root_item,
  1078. root->node);
  1079. }
  1080. err = btrfs_update_root(trans, fs_info->tree_root,
  1081. &root->root_key,
  1082. &root->root_item);
  1083. spin_lock(&fs_info->fs_roots_radix_lock);
  1084. if (err)
  1085. break;
  1086. }
  1087. }
  1088. spin_unlock(&fs_info->fs_roots_radix_lock);
  1089. return err;
  1090. }
  1091. /*
  1092. * defrag a given btree.
  1093. * Every leaf in the btree is read and defragged.
  1094. */
  1095. int btrfs_defrag_root(struct btrfs_root *root)
  1096. {
  1097. struct btrfs_fs_info *info = root->fs_info;
  1098. struct btrfs_trans_handle *trans;
  1099. int ret;
  1100. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  1101. return 0;
  1102. while (1) {
  1103. trans = btrfs_start_transaction(root, 0);
  1104. if (IS_ERR(trans))
  1105. return PTR_ERR(trans);
  1106. ret = btrfs_defrag_leaves(trans, root);
  1107. btrfs_end_transaction(trans, root);
  1108. btrfs_btree_balance_dirty(info->tree_root);
  1109. cond_resched();
  1110. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  1111. break;
  1112. if (btrfs_defrag_cancelled(root->fs_info)) {
  1113. pr_debug("BTRFS: defrag_root cancelled\n");
  1114. ret = -EAGAIN;
  1115. break;
  1116. }
  1117. }
  1118. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  1119. return ret;
  1120. }
  1121. /*
  1122. * new snapshots need to be created at a very specific time in the
  1123. * transaction commit. This does the actual creation.
  1124. *
  1125. * Note:
  1126. * If the error which may affect the commitment of the current transaction
  1127. * happens, we should return the error number. If the error which just affect
  1128. * the creation of the pending snapshots, just return 0.
  1129. */
  1130. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  1131. struct btrfs_fs_info *fs_info,
  1132. struct btrfs_pending_snapshot *pending)
  1133. {
  1134. struct btrfs_key key;
  1135. struct btrfs_root_item *new_root_item;
  1136. struct btrfs_root *tree_root = fs_info->tree_root;
  1137. struct btrfs_root *root = pending->root;
  1138. struct btrfs_root *parent_root;
  1139. struct btrfs_block_rsv *rsv;
  1140. struct inode *parent_inode;
  1141. struct btrfs_path *path;
  1142. struct btrfs_dir_item *dir_item;
  1143. struct dentry *dentry;
  1144. struct extent_buffer *tmp;
  1145. struct extent_buffer *old;
  1146. struct timespec cur_time = CURRENT_TIME;
  1147. int ret = 0;
  1148. u64 to_reserve = 0;
  1149. u64 index = 0;
  1150. u64 objectid;
  1151. u64 root_flags;
  1152. uuid_le new_uuid;
  1153. path = btrfs_alloc_path();
  1154. if (!path) {
  1155. pending->error = -ENOMEM;
  1156. return 0;
  1157. }
  1158. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  1159. if (!new_root_item) {
  1160. pending->error = -ENOMEM;
  1161. goto root_item_alloc_fail;
  1162. }
  1163. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1164. if (pending->error)
  1165. goto no_free_objectid;
  1166. /*
  1167. * Make qgroup to skip current new snapshot's qgroupid, as it is
  1168. * accounted by later btrfs_qgroup_inherit().
  1169. */
  1170. btrfs_set_skip_qgroup(trans, objectid);
  1171. btrfs_reloc_pre_snapshot(pending, &to_reserve);
  1172. if (to_reserve > 0) {
  1173. pending->error = btrfs_block_rsv_add(root,
  1174. &pending->block_rsv,
  1175. to_reserve,
  1176. BTRFS_RESERVE_NO_FLUSH);
  1177. if (pending->error)
  1178. goto clear_skip_qgroup;
  1179. }
  1180. key.objectid = objectid;
  1181. key.offset = (u64)-1;
  1182. key.type = BTRFS_ROOT_ITEM_KEY;
  1183. rsv = trans->block_rsv;
  1184. trans->block_rsv = &pending->block_rsv;
  1185. trans->bytes_reserved = trans->block_rsv->reserved;
  1186. dentry = pending->dentry;
  1187. parent_inode = pending->dir;
  1188. parent_root = BTRFS_I(parent_inode)->root;
  1189. record_root_in_trans(trans, parent_root);
  1190. /*
  1191. * insert the directory item
  1192. */
  1193. ret = btrfs_set_inode_index(parent_inode, &index);
  1194. BUG_ON(ret); /* -ENOMEM */
  1195. /* check if there is a file/dir which has the same name. */
  1196. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1197. btrfs_ino(parent_inode),
  1198. dentry->d_name.name,
  1199. dentry->d_name.len, 0);
  1200. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1201. pending->error = -EEXIST;
  1202. goto dir_item_existed;
  1203. } else if (IS_ERR(dir_item)) {
  1204. ret = PTR_ERR(dir_item);
  1205. btrfs_abort_transaction(trans, root, ret);
  1206. goto fail;
  1207. }
  1208. btrfs_release_path(path);
  1209. /*
  1210. * pull in the delayed directory update
  1211. * and the delayed inode item
  1212. * otherwise we corrupt the FS during
  1213. * snapshot
  1214. */
  1215. ret = btrfs_run_delayed_items(trans, root);
  1216. if (ret) { /* Transaction aborted */
  1217. btrfs_abort_transaction(trans, root, ret);
  1218. goto fail;
  1219. }
  1220. record_root_in_trans(trans, root);
  1221. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1222. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1223. btrfs_check_and_init_root_item(new_root_item);
  1224. root_flags = btrfs_root_flags(new_root_item);
  1225. if (pending->readonly)
  1226. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1227. else
  1228. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1229. btrfs_set_root_flags(new_root_item, root_flags);
  1230. btrfs_set_root_generation_v2(new_root_item,
  1231. trans->transid);
  1232. uuid_le_gen(&new_uuid);
  1233. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1234. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1235. BTRFS_UUID_SIZE);
  1236. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1237. memset(new_root_item->received_uuid, 0,
  1238. sizeof(new_root_item->received_uuid));
  1239. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1240. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1241. btrfs_set_root_stransid(new_root_item, 0);
  1242. btrfs_set_root_rtransid(new_root_item, 0);
  1243. }
  1244. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1245. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1246. btrfs_set_root_otransid(new_root_item, trans->transid);
  1247. old = btrfs_lock_root_node(root);
  1248. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1249. if (ret) {
  1250. btrfs_tree_unlock(old);
  1251. free_extent_buffer(old);
  1252. btrfs_abort_transaction(trans, root, ret);
  1253. goto fail;
  1254. }
  1255. btrfs_set_lock_blocking(old);
  1256. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1257. /* clean up in any case */
  1258. btrfs_tree_unlock(old);
  1259. free_extent_buffer(old);
  1260. if (ret) {
  1261. btrfs_abort_transaction(trans, root, ret);
  1262. goto fail;
  1263. }
  1264. /* see comments in should_cow_block() */
  1265. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1266. smp_wmb();
  1267. btrfs_set_root_node(new_root_item, tmp);
  1268. /* record when the snapshot was created in key.offset */
  1269. key.offset = trans->transid;
  1270. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1271. btrfs_tree_unlock(tmp);
  1272. free_extent_buffer(tmp);
  1273. if (ret) {
  1274. btrfs_abort_transaction(trans, root, ret);
  1275. goto fail;
  1276. }
  1277. /*
  1278. * insert root back/forward references
  1279. */
  1280. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1281. parent_root->root_key.objectid,
  1282. btrfs_ino(parent_inode), index,
  1283. dentry->d_name.name, dentry->d_name.len);
  1284. if (ret) {
  1285. btrfs_abort_transaction(trans, root, ret);
  1286. goto fail;
  1287. }
  1288. key.offset = (u64)-1;
  1289. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1290. if (IS_ERR(pending->snap)) {
  1291. ret = PTR_ERR(pending->snap);
  1292. btrfs_abort_transaction(trans, root, ret);
  1293. goto fail;
  1294. }
  1295. ret = btrfs_reloc_post_snapshot(trans, pending);
  1296. if (ret) {
  1297. btrfs_abort_transaction(trans, root, ret);
  1298. goto fail;
  1299. }
  1300. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1301. if (ret) {
  1302. btrfs_abort_transaction(trans, root, ret);
  1303. goto fail;
  1304. }
  1305. ret = btrfs_insert_dir_item(trans, parent_root,
  1306. dentry->d_name.name, dentry->d_name.len,
  1307. parent_inode, &key,
  1308. BTRFS_FT_DIR, index);
  1309. /* We have check then name at the beginning, so it is impossible. */
  1310. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1311. if (ret) {
  1312. btrfs_abort_transaction(trans, root, ret);
  1313. goto fail;
  1314. }
  1315. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1316. dentry->d_name.len * 2);
  1317. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1318. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1319. if (ret) {
  1320. btrfs_abort_transaction(trans, root, ret);
  1321. goto fail;
  1322. }
  1323. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
  1324. BTRFS_UUID_KEY_SUBVOL, objectid);
  1325. if (ret) {
  1326. btrfs_abort_transaction(trans, root, ret);
  1327. goto fail;
  1328. }
  1329. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1330. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
  1331. new_root_item->received_uuid,
  1332. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1333. objectid);
  1334. if (ret && ret != -EEXIST) {
  1335. btrfs_abort_transaction(trans, root, ret);
  1336. goto fail;
  1337. }
  1338. }
  1339. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1340. if (ret) {
  1341. btrfs_abort_transaction(trans, root, ret);
  1342. goto fail;
  1343. }
  1344. /*
  1345. * account qgroup counters before qgroup_inherit()
  1346. */
  1347. ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
  1348. if (ret)
  1349. goto fail;
  1350. ret = btrfs_qgroup_account_extents(trans, fs_info);
  1351. if (ret)
  1352. goto fail;
  1353. ret = btrfs_qgroup_inherit(trans, fs_info,
  1354. root->root_key.objectid,
  1355. objectid, pending->inherit);
  1356. if (ret) {
  1357. btrfs_abort_transaction(trans, root, ret);
  1358. goto fail;
  1359. }
  1360. fail:
  1361. pending->error = ret;
  1362. dir_item_existed:
  1363. trans->block_rsv = rsv;
  1364. trans->bytes_reserved = 0;
  1365. clear_skip_qgroup:
  1366. btrfs_clear_skip_qgroup(trans);
  1367. no_free_objectid:
  1368. kfree(new_root_item);
  1369. root_item_alloc_fail:
  1370. btrfs_free_path(path);
  1371. return ret;
  1372. }
  1373. /*
  1374. * create all the snapshots we've scheduled for creation
  1375. */
  1376. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1377. struct btrfs_fs_info *fs_info)
  1378. {
  1379. struct btrfs_pending_snapshot *pending, *next;
  1380. struct list_head *head = &trans->transaction->pending_snapshots;
  1381. int ret = 0;
  1382. list_for_each_entry_safe(pending, next, head, list) {
  1383. list_del(&pending->list);
  1384. ret = create_pending_snapshot(trans, fs_info, pending);
  1385. if (ret)
  1386. break;
  1387. }
  1388. return ret;
  1389. }
  1390. static void update_super_roots(struct btrfs_root *root)
  1391. {
  1392. struct btrfs_root_item *root_item;
  1393. struct btrfs_super_block *super;
  1394. super = root->fs_info->super_copy;
  1395. root_item = &root->fs_info->chunk_root->root_item;
  1396. super->chunk_root = root_item->bytenr;
  1397. super->chunk_root_generation = root_item->generation;
  1398. super->chunk_root_level = root_item->level;
  1399. root_item = &root->fs_info->tree_root->root_item;
  1400. super->root = root_item->bytenr;
  1401. super->generation = root_item->generation;
  1402. super->root_level = root_item->level;
  1403. if (btrfs_test_opt(root, SPACE_CACHE))
  1404. super->cache_generation = root_item->generation;
  1405. if (root->fs_info->update_uuid_tree_gen)
  1406. super->uuid_tree_generation = root_item->generation;
  1407. }
  1408. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1409. {
  1410. struct btrfs_transaction *trans;
  1411. int ret = 0;
  1412. spin_lock(&info->trans_lock);
  1413. trans = info->running_transaction;
  1414. if (trans)
  1415. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1416. spin_unlock(&info->trans_lock);
  1417. return ret;
  1418. }
  1419. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1420. {
  1421. struct btrfs_transaction *trans;
  1422. int ret = 0;
  1423. spin_lock(&info->trans_lock);
  1424. trans = info->running_transaction;
  1425. if (trans)
  1426. ret = is_transaction_blocked(trans);
  1427. spin_unlock(&info->trans_lock);
  1428. return ret;
  1429. }
  1430. /*
  1431. * wait for the current transaction commit to start and block subsequent
  1432. * transaction joins
  1433. */
  1434. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1435. struct btrfs_transaction *trans)
  1436. {
  1437. wait_event(root->fs_info->transaction_blocked_wait,
  1438. trans->state >= TRANS_STATE_COMMIT_START ||
  1439. trans->aborted);
  1440. }
  1441. /*
  1442. * wait for the current transaction to start and then become unblocked.
  1443. * caller holds ref.
  1444. */
  1445. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1446. struct btrfs_transaction *trans)
  1447. {
  1448. wait_event(root->fs_info->transaction_wait,
  1449. trans->state >= TRANS_STATE_UNBLOCKED ||
  1450. trans->aborted);
  1451. }
  1452. /*
  1453. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1454. * returns, any subsequent transaction will not be allowed to join.
  1455. */
  1456. struct btrfs_async_commit {
  1457. struct btrfs_trans_handle *newtrans;
  1458. struct btrfs_root *root;
  1459. struct work_struct work;
  1460. };
  1461. static void do_async_commit(struct work_struct *work)
  1462. {
  1463. struct btrfs_async_commit *ac =
  1464. container_of(work, struct btrfs_async_commit, work);
  1465. /*
  1466. * We've got freeze protection passed with the transaction.
  1467. * Tell lockdep about it.
  1468. */
  1469. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1470. __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
  1471. current->journal_info = ac->newtrans;
  1472. btrfs_commit_transaction(ac->newtrans, ac->root);
  1473. kfree(ac);
  1474. }
  1475. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1476. struct btrfs_root *root,
  1477. int wait_for_unblock)
  1478. {
  1479. struct btrfs_async_commit *ac;
  1480. struct btrfs_transaction *cur_trans;
  1481. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1482. if (!ac)
  1483. return -ENOMEM;
  1484. INIT_WORK(&ac->work, do_async_commit);
  1485. ac->root = root;
  1486. ac->newtrans = btrfs_join_transaction(root);
  1487. if (IS_ERR(ac->newtrans)) {
  1488. int err = PTR_ERR(ac->newtrans);
  1489. kfree(ac);
  1490. return err;
  1491. }
  1492. /* take transaction reference */
  1493. cur_trans = trans->transaction;
  1494. atomic_inc(&cur_trans->use_count);
  1495. btrfs_end_transaction(trans, root);
  1496. /*
  1497. * Tell lockdep we've released the freeze rwsem, since the
  1498. * async commit thread will be the one to unlock it.
  1499. */
  1500. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1501. __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
  1502. schedule_work(&ac->work);
  1503. /* wait for transaction to start and unblock */
  1504. if (wait_for_unblock)
  1505. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1506. else
  1507. wait_current_trans_commit_start(root, cur_trans);
  1508. if (current->journal_info == trans)
  1509. current->journal_info = NULL;
  1510. btrfs_put_transaction(cur_trans);
  1511. return 0;
  1512. }
  1513. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1514. struct btrfs_root *root, int err)
  1515. {
  1516. struct btrfs_transaction *cur_trans = trans->transaction;
  1517. DEFINE_WAIT(wait);
  1518. WARN_ON(trans->use_count > 1);
  1519. btrfs_abort_transaction(trans, root, err);
  1520. spin_lock(&root->fs_info->trans_lock);
  1521. /*
  1522. * If the transaction is removed from the list, it means this
  1523. * transaction has been committed successfully, so it is impossible
  1524. * to call the cleanup function.
  1525. */
  1526. BUG_ON(list_empty(&cur_trans->list));
  1527. list_del_init(&cur_trans->list);
  1528. if (cur_trans == root->fs_info->running_transaction) {
  1529. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1530. spin_unlock(&root->fs_info->trans_lock);
  1531. wait_event(cur_trans->writer_wait,
  1532. atomic_read(&cur_trans->num_writers) == 1);
  1533. spin_lock(&root->fs_info->trans_lock);
  1534. }
  1535. spin_unlock(&root->fs_info->trans_lock);
  1536. btrfs_cleanup_one_transaction(trans->transaction, root);
  1537. spin_lock(&root->fs_info->trans_lock);
  1538. if (cur_trans == root->fs_info->running_transaction)
  1539. root->fs_info->running_transaction = NULL;
  1540. spin_unlock(&root->fs_info->trans_lock);
  1541. if (trans->type & __TRANS_FREEZABLE)
  1542. sb_end_intwrite(root->fs_info->sb);
  1543. btrfs_put_transaction(cur_trans);
  1544. btrfs_put_transaction(cur_trans);
  1545. trace_btrfs_transaction_commit(root);
  1546. if (current->journal_info == trans)
  1547. current->journal_info = NULL;
  1548. btrfs_scrub_cancel(root->fs_info);
  1549. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1550. }
  1551. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1552. {
  1553. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1554. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1555. return 0;
  1556. }
  1557. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1558. {
  1559. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1560. btrfs_wait_ordered_roots(fs_info, -1);
  1561. }
  1562. static inline void
  1563. btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
  1564. struct btrfs_fs_info *fs_info)
  1565. {
  1566. struct btrfs_ordered_extent *ordered;
  1567. spin_lock(&fs_info->trans_lock);
  1568. while (!list_empty(&cur_trans->pending_ordered)) {
  1569. ordered = list_first_entry(&cur_trans->pending_ordered,
  1570. struct btrfs_ordered_extent,
  1571. trans_list);
  1572. list_del_init(&ordered->trans_list);
  1573. spin_unlock(&fs_info->trans_lock);
  1574. wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
  1575. &ordered->flags));
  1576. btrfs_put_ordered_extent(ordered);
  1577. spin_lock(&fs_info->trans_lock);
  1578. }
  1579. spin_unlock(&fs_info->trans_lock);
  1580. }
  1581. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1582. struct btrfs_root *root)
  1583. {
  1584. struct btrfs_transaction *cur_trans = trans->transaction;
  1585. struct btrfs_transaction *prev_trans = NULL;
  1586. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  1587. int ret;
  1588. /* Stop the commit early if ->aborted is set */
  1589. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1590. ret = cur_trans->aborted;
  1591. btrfs_end_transaction(trans, root);
  1592. return ret;
  1593. }
  1594. /* make a pass through all the delayed refs we have so far
  1595. * any runnings procs may add more while we are here
  1596. */
  1597. ret = btrfs_run_delayed_refs(trans, root, 0);
  1598. if (ret) {
  1599. btrfs_end_transaction(trans, root);
  1600. return ret;
  1601. }
  1602. btrfs_trans_release_metadata(trans, root);
  1603. trans->block_rsv = NULL;
  1604. if (trans->qgroup_reserved) {
  1605. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1606. trans->qgroup_reserved = 0;
  1607. }
  1608. cur_trans = trans->transaction;
  1609. /*
  1610. * set the flushing flag so procs in this transaction have to
  1611. * start sending their work down.
  1612. */
  1613. cur_trans->delayed_refs.flushing = 1;
  1614. smp_wmb();
  1615. if (!list_empty(&trans->new_bgs))
  1616. btrfs_create_pending_block_groups(trans, root);
  1617. ret = btrfs_run_delayed_refs(trans, root, 0);
  1618. if (ret) {
  1619. btrfs_end_transaction(trans, root);
  1620. return ret;
  1621. }
  1622. if (!cur_trans->dirty_bg_run) {
  1623. int run_it = 0;
  1624. /* this mutex is also taken before trying to set
  1625. * block groups readonly. We need to make sure
  1626. * that nobody has set a block group readonly
  1627. * after a extents from that block group have been
  1628. * allocated for cache files. btrfs_set_block_group_ro
  1629. * will wait for the transaction to commit if it
  1630. * finds dirty_bg_run = 1
  1631. *
  1632. * The dirty_bg_run flag is also used to make sure only
  1633. * one process starts all the block group IO. It wouldn't
  1634. * hurt to have more than one go through, but there's no
  1635. * real advantage to it either.
  1636. */
  1637. mutex_lock(&root->fs_info->ro_block_group_mutex);
  1638. if (!cur_trans->dirty_bg_run) {
  1639. run_it = 1;
  1640. cur_trans->dirty_bg_run = 1;
  1641. }
  1642. mutex_unlock(&root->fs_info->ro_block_group_mutex);
  1643. if (run_it)
  1644. ret = btrfs_start_dirty_block_groups(trans, root);
  1645. }
  1646. if (ret) {
  1647. btrfs_end_transaction(trans, root);
  1648. return ret;
  1649. }
  1650. spin_lock(&root->fs_info->trans_lock);
  1651. list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
  1652. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1653. spin_unlock(&root->fs_info->trans_lock);
  1654. atomic_inc(&cur_trans->use_count);
  1655. ret = btrfs_end_transaction(trans, root);
  1656. wait_for_commit(root, cur_trans);
  1657. if (unlikely(cur_trans->aborted))
  1658. ret = cur_trans->aborted;
  1659. btrfs_put_transaction(cur_trans);
  1660. return ret;
  1661. }
  1662. cur_trans->state = TRANS_STATE_COMMIT_START;
  1663. wake_up(&root->fs_info->transaction_blocked_wait);
  1664. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1665. prev_trans = list_entry(cur_trans->list.prev,
  1666. struct btrfs_transaction, list);
  1667. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1668. atomic_inc(&prev_trans->use_count);
  1669. spin_unlock(&root->fs_info->trans_lock);
  1670. wait_for_commit(root, prev_trans);
  1671. ret = prev_trans->aborted;
  1672. btrfs_put_transaction(prev_trans);
  1673. if (ret)
  1674. goto cleanup_transaction;
  1675. } else {
  1676. spin_unlock(&root->fs_info->trans_lock);
  1677. }
  1678. } else {
  1679. spin_unlock(&root->fs_info->trans_lock);
  1680. }
  1681. extwriter_counter_dec(cur_trans, trans->type);
  1682. ret = btrfs_start_delalloc_flush(root->fs_info);
  1683. if (ret)
  1684. goto cleanup_transaction;
  1685. ret = btrfs_run_delayed_items(trans, root);
  1686. if (ret)
  1687. goto cleanup_transaction;
  1688. wait_event(cur_trans->writer_wait,
  1689. extwriter_counter_read(cur_trans) == 0);
  1690. /* some pending stuffs might be added after the previous flush. */
  1691. ret = btrfs_run_delayed_items(trans, root);
  1692. if (ret)
  1693. goto cleanup_transaction;
  1694. btrfs_wait_delalloc_flush(root->fs_info);
  1695. btrfs_wait_pending_ordered(cur_trans, root->fs_info);
  1696. btrfs_scrub_pause(root);
  1697. /*
  1698. * Ok now we need to make sure to block out any other joins while we
  1699. * commit the transaction. We could have started a join before setting
  1700. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1701. */
  1702. spin_lock(&root->fs_info->trans_lock);
  1703. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1704. spin_unlock(&root->fs_info->trans_lock);
  1705. wait_event(cur_trans->writer_wait,
  1706. atomic_read(&cur_trans->num_writers) == 1);
  1707. /* ->aborted might be set after the previous check, so check it */
  1708. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1709. ret = cur_trans->aborted;
  1710. goto scrub_continue;
  1711. }
  1712. /*
  1713. * the reloc mutex makes sure that we stop
  1714. * the balancing code from coming in and moving
  1715. * extents around in the middle of the commit
  1716. */
  1717. mutex_lock(&root->fs_info->reloc_mutex);
  1718. /*
  1719. * We needn't worry about the delayed items because we will
  1720. * deal with them in create_pending_snapshot(), which is the
  1721. * core function of the snapshot creation.
  1722. */
  1723. ret = create_pending_snapshots(trans, root->fs_info);
  1724. if (ret) {
  1725. mutex_unlock(&root->fs_info->reloc_mutex);
  1726. goto scrub_continue;
  1727. }
  1728. /*
  1729. * We insert the dir indexes of the snapshots and update the inode
  1730. * of the snapshots' parents after the snapshot creation, so there
  1731. * are some delayed items which are not dealt with. Now deal with
  1732. * them.
  1733. *
  1734. * We needn't worry that this operation will corrupt the snapshots,
  1735. * because all the tree which are snapshoted will be forced to COW
  1736. * the nodes and leaves.
  1737. */
  1738. ret = btrfs_run_delayed_items(trans, root);
  1739. if (ret) {
  1740. mutex_unlock(&root->fs_info->reloc_mutex);
  1741. goto scrub_continue;
  1742. }
  1743. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1744. if (ret) {
  1745. mutex_unlock(&root->fs_info->reloc_mutex);
  1746. goto scrub_continue;
  1747. }
  1748. /* Reocrd old roots for later qgroup accounting */
  1749. ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
  1750. if (ret) {
  1751. mutex_unlock(&root->fs_info->reloc_mutex);
  1752. goto scrub_continue;
  1753. }
  1754. /*
  1755. * make sure none of the code above managed to slip in a
  1756. * delayed item
  1757. */
  1758. btrfs_assert_delayed_root_empty(root);
  1759. WARN_ON(cur_trans != trans->transaction);
  1760. /* btrfs_commit_tree_roots is responsible for getting the
  1761. * various roots consistent with each other. Every pointer
  1762. * in the tree of tree roots has to point to the most up to date
  1763. * root for every subvolume and other tree. So, we have to keep
  1764. * the tree logging code from jumping in and changing any
  1765. * of the trees.
  1766. *
  1767. * At this point in the commit, there can't be any tree-log
  1768. * writers, but a little lower down we drop the trans mutex
  1769. * and let new people in. By holding the tree_log_mutex
  1770. * from now until after the super is written, we avoid races
  1771. * with the tree-log code.
  1772. */
  1773. mutex_lock(&root->fs_info->tree_log_mutex);
  1774. ret = commit_fs_roots(trans, root);
  1775. if (ret) {
  1776. mutex_unlock(&root->fs_info->tree_log_mutex);
  1777. mutex_unlock(&root->fs_info->reloc_mutex);
  1778. goto scrub_continue;
  1779. }
  1780. /*
  1781. * Since the transaction is done, we can apply the pending changes
  1782. * before the next transaction.
  1783. */
  1784. btrfs_apply_pending_changes(root->fs_info);
  1785. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1786. * safe to free the root of tree log roots
  1787. */
  1788. btrfs_free_log_root_tree(trans, root->fs_info);
  1789. /*
  1790. * Since fs roots are all committed, we can get a quite accurate
  1791. * new_roots. So let's do quota accounting.
  1792. */
  1793. ret = btrfs_qgroup_account_extents(trans, root->fs_info);
  1794. if (ret < 0) {
  1795. mutex_unlock(&root->fs_info->tree_log_mutex);
  1796. mutex_unlock(&root->fs_info->reloc_mutex);
  1797. goto scrub_continue;
  1798. }
  1799. ret = commit_cowonly_roots(trans, root);
  1800. if (ret) {
  1801. mutex_unlock(&root->fs_info->tree_log_mutex);
  1802. mutex_unlock(&root->fs_info->reloc_mutex);
  1803. goto scrub_continue;
  1804. }
  1805. /*
  1806. * The tasks which save the space cache and inode cache may also
  1807. * update ->aborted, check it.
  1808. */
  1809. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1810. ret = cur_trans->aborted;
  1811. mutex_unlock(&root->fs_info->tree_log_mutex);
  1812. mutex_unlock(&root->fs_info->reloc_mutex);
  1813. goto scrub_continue;
  1814. }
  1815. btrfs_prepare_extent_commit(trans, root);
  1816. cur_trans = root->fs_info->running_transaction;
  1817. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1818. root->fs_info->tree_root->node);
  1819. list_add_tail(&root->fs_info->tree_root->dirty_list,
  1820. &cur_trans->switch_commits);
  1821. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1822. root->fs_info->chunk_root->node);
  1823. list_add_tail(&root->fs_info->chunk_root->dirty_list,
  1824. &cur_trans->switch_commits);
  1825. switch_commit_roots(cur_trans, root->fs_info);
  1826. assert_qgroups_uptodate(trans);
  1827. ASSERT(list_empty(&cur_trans->dirty_bgs));
  1828. ASSERT(list_empty(&cur_trans->io_bgs));
  1829. update_super_roots(root);
  1830. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1831. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1832. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1833. sizeof(*root->fs_info->super_copy));
  1834. btrfs_update_commit_device_size(root->fs_info);
  1835. btrfs_update_commit_device_bytes_used(root, cur_trans);
  1836. clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
  1837. clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
  1838. btrfs_trans_release_chunk_metadata(trans);
  1839. spin_lock(&root->fs_info->trans_lock);
  1840. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1841. root->fs_info->running_transaction = NULL;
  1842. spin_unlock(&root->fs_info->trans_lock);
  1843. mutex_unlock(&root->fs_info->reloc_mutex);
  1844. wake_up(&root->fs_info->transaction_wait);
  1845. ret = btrfs_write_and_wait_transaction(trans, root);
  1846. if (ret) {
  1847. btrfs_error(root->fs_info, ret,
  1848. "Error while writing out transaction");
  1849. mutex_unlock(&root->fs_info->tree_log_mutex);
  1850. goto scrub_continue;
  1851. }
  1852. ret = write_ctree_super(trans, root, 0);
  1853. if (ret) {
  1854. mutex_unlock(&root->fs_info->tree_log_mutex);
  1855. goto scrub_continue;
  1856. }
  1857. /*
  1858. * the super is written, we can safely allow the tree-loggers
  1859. * to go about their business
  1860. */
  1861. mutex_unlock(&root->fs_info->tree_log_mutex);
  1862. btrfs_finish_extent_commit(trans, root);
  1863. if (cur_trans->have_free_bgs)
  1864. btrfs_clear_space_info_full(root->fs_info);
  1865. root->fs_info->last_trans_committed = cur_trans->transid;
  1866. /*
  1867. * We needn't acquire the lock here because there is no other task
  1868. * which can change it.
  1869. */
  1870. cur_trans->state = TRANS_STATE_COMPLETED;
  1871. wake_up(&cur_trans->commit_wait);
  1872. spin_lock(&root->fs_info->trans_lock);
  1873. list_del_init(&cur_trans->list);
  1874. spin_unlock(&root->fs_info->trans_lock);
  1875. btrfs_put_transaction(cur_trans);
  1876. btrfs_put_transaction(cur_trans);
  1877. if (trans->type & __TRANS_FREEZABLE)
  1878. sb_end_intwrite(root->fs_info->sb);
  1879. trace_btrfs_transaction_commit(root);
  1880. btrfs_scrub_continue(root);
  1881. if (current->journal_info == trans)
  1882. current->journal_info = NULL;
  1883. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1884. if (current != root->fs_info->transaction_kthread &&
  1885. current != root->fs_info->cleaner_kthread)
  1886. btrfs_run_delayed_iputs(root);
  1887. return ret;
  1888. scrub_continue:
  1889. btrfs_scrub_continue(root);
  1890. cleanup_transaction:
  1891. btrfs_trans_release_metadata(trans, root);
  1892. btrfs_trans_release_chunk_metadata(trans);
  1893. trans->block_rsv = NULL;
  1894. if (trans->qgroup_reserved) {
  1895. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1896. trans->qgroup_reserved = 0;
  1897. }
  1898. btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
  1899. if (current->journal_info == trans)
  1900. current->journal_info = NULL;
  1901. cleanup_transaction(trans, root, ret);
  1902. return ret;
  1903. }
  1904. /*
  1905. * return < 0 if error
  1906. * 0 if there are no more dead_roots at the time of call
  1907. * 1 there are more to be processed, call me again
  1908. *
  1909. * The return value indicates there are certainly more snapshots to delete, but
  1910. * if there comes a new one during processing, it may return 0. We don't mind,
  1911. * because btrfs_commit_super will poke cleaner thread and it will process it a
  1912. * few seconds later.
  1913. */
  1914. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  1915. {
  1916. int ret;
  1917. struct btrfs_fs_info *fs_info = root->fs_info;
  1918. spin_lock(&fs_info->trans_lock);
  1919. if (list_empty(&fs_info->dead_roots)) {
  1920. spin_unlock(&fs_info->trans_lock);
  1921. return 0;
  1922. }
  1923. root = list_first_entry(&fs_info->dead_roots,
  1924. struct btrfs_root, root_list);
  1925. list_del_init(&root->root_list);
  1926. spin_unlock(&fs_info->trans_lock);
  1927. pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
  1928. btrfs_kill_all_delayed_nodes(root);
  1929. if (btrfs_header_backref_rev(root->node) <
  1930. BTRFS_MIXED_BACKREF_REV)
  1931. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1932. else
  1933. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  1934. return (ret < 0) ? 0 : 1;
  1935. }
  1936. void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
  1937. {
  1938. unsigned long prev;
  1939. unsigned long bit;
  1940. prev = xchg(&fs_info->pending_changes, 0);
  1941. if (!prev)
  1942. return;
  1943. bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
  1944. if (prev & bit)
  1945. btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  1946. prev &= ~bit;
  1947. bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
  1948. if (prev & bit)
  1949. btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  1950. prev &= ~bit;
  1951. bit = 1 << BTRFS_PENDING_COMMIT;
  1952. if (prev & bit)
  1953. btrfs_debug(fs_info, "pending commit done");
  1954. prev &= ~bit;
  1955. if (prev)
  1956. btrfs_warn(fs_info,
  1957. "unknown pending changes left 0x%lx, ignoring", prev);
  1958. }