transaction.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(atomic_read(&transaction->use_count) == 0);
  60. if (atomic_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. while (!list_empty(&transaction->pending_chunks)) {
  64. struct extent_map *em;
  65. em = list_first_entry(&transaction->pending_chunks,
  66. struct extent_map, list);
  67. list_del_init(&em->list);
  68. free_extent_map(em);
  69. }
  70. kmem_cache_free(btrfs_transaction_cachep, transaction);
  71. }
  72. }
  73. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  74. struct btrfs_fs_info *fs_info)
  75. {
  76. struct btrfs_root *root, *tmp;
  77. down_write(&fs_info->commit_root_sem);
  78. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  79. dirty_list) {
  80. list_del_init(&root->dirty_list);
  81. free_extent_buffer(root->commit_root);
  82. root->commit_root = btrfs_root_node(root);
  83. if (is_fstree(root->objectid))
  84. btrfs_unpin_free_ino(root);
  85. }
  86. up_write(&fs_info->commit_root_sem);
  87. }
  88. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  89. unsigned int type)
  90. {
  91. if (type & TRANS_EXTWRITERS)
  92. atomic_inc(&trans->num_extwriters);
  93. }
  94. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  95. unsigned int type)
  96. {
  97. if (type & TRANS_EXTWRITERS)
  98. atomic_dec(&trans->num_extwriters);
  99. }
  100. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  101. unsigned int type)
  102. {
  103. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  104. }
  105. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  106. {
  107. return atomic_read(&trans->num_extwriters);
  108. }
  109. /*
  110. * either allocate a new transaction or hop into the existing one
  111. */
  112. static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
  113. {
  114. struct btrfs_transaction *cur_trans;
  115. struct btrfs_fs_info *fs_info = root->fs_info;
  116. spin_lock(&fs_info->trans_lock);
  117. loop:
  118. /* The file system has been taken offline. No new transactions. */
  119. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  120. spin_unlock(&fs_info->trans_lock);
  121. return -EROFS;
  122. }
  123. cur_trans = fs_info->running_transaction;
  124. if (cur_trans) {
  125. if (cur_trans->aborted) {
  126. spin_unlock(&fs_info->trans_lock);
  127. return cur_trans->aborted;
  128. }
  129. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  130. spin_unlock(&fs_info->trans_lock);
  131. return -EBUSY;
  132. }
  133. atomic_inc(&cur_trans->use_count);
  134. atomic_inc(&cur_trans->num_writers);
  135. extwriter_counter_inc(cur_trans, type);
  136. spin_unlock(&fs_info->trans_lock);
  137. return 0;
  138. }
  139. spin_unlock(&fs_info->trans_lock);
  140. /*
  141. * If we are ATTACH, we just want to catch the current transaction,
  142. * and commit it. If there is no transaction, just return ENOENT.
  143. */
  144. if (type == TRANS_ATTACH)
  145. return -ENOENT;
  146. /*
  147. * JOIN_NOLOCK only happens during the transaction commit, so
  148. * it is impossible that ->running_transaction is NULL
  149. */
  150. BUG_ON(type == TRANS_JOIN_NOLOCK);
  151. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  152. if (!cur_trans)
  153. return -ENOMEM;
  154. spin_lock(&fs_info->trans_lock);
  155. if (fs_info->running_transaction) {
  156. /*
  157. * someone started a transaction after we unlocked. Make sure
  158. * to redo the checks above
  159. */
  160. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  161. goto loop;
  162. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  163. spin_unlock(&fs_info->trans_lock);
  164. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  165. return -EROFS;
  166. }
  167. atomic_set(&cur_trans->num_writers, 1);
  168. extwriter_counter_init(cur_trans, type);
  169. init_waitqueue_head(&cur_trans->writer_wait);
  170. init_waitqueue_head(&cur_trans->commit_wait);
  171. cur_trans->state = TRANS_STATE_RUNNING;
  172. /*
  173. * One for this trans handle, one so it will live on until we
  174. * commit the transaction.
  175. */
  176. atomic_set(&cur_trans->use_count, 2);
  177. cur_trans->start_time = get_seconds();
  178. cur_trans->delayed_refs.href_root = RB_ROOT;
  179. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  180. cur_trans->delayed_refs.num_heads_ready = 0;
  181. cur_trans->delayed_refs.num_heads = 0;
  182. cur_trans->delayed_refs.flushing = 0;
  183. cur_trans->delayed_refs.run_delayed_start = 0;
  184. /*
  185. * although the tree mod log is per file system and not per transaction,
  186. * the log must never go across transaction boundaries.
  187. */
  188. smp_mb();
  189. if (!list_empty(&fs_info->tree_mod_seq_list))
  190. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
  191. "creating a fresh transaction\n");
  192. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  193. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
  194. "creating a fresh transaction\n");
  195. atomic64_set(&fs_info->tree_mod_seq, 0);
  196. spin_lock_init(&cur_trans->delayed_refs.lock);
  197. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  198. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  199. INIT_LIST_HEAD(&cur_trans->switch_commits);
  200. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  201. extent_io_tree_init(&cur_trans->dirty_pages,
  202. fs_info->btree_inode->i_mapping);
  203. fs_info->generation++;
  204. cur_trans->transid = fs_info->generation;
  205. fs_info->running_transaction = cur_trans;
  206. cur_trans->aborted = 0;
  207. spin_unlock(&fs_info->trans_lock);
  208. return 0;
  209. }
  210. /*
  211. * this does all the record keeping required to make sure that a reference
  212. * counted root is properly recorded in a given transaction. This is required
  213. * to make sure the old root from before we joined the transaction is deleted
  214. * when the transaction commits
  215. */
  216. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  217. struct btrfs_root *root)
  218. {
  219. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  220. root->last_trans < trans->transid) {
  221. WARN_ON(root == root->fs_info->extent_root);
  222. WARN_ON(root->commit_root != root->node);
  223. /*
  224. * see below for IN_TRANS_SETUP usage rules
  225. * we have the reloc mutex held now, so there
  226. * is only one writer in this function
  227. */
  228. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  229. /* make sure readers find IN_TRANS_SETUP before
  230. * they find our root->last_trans update
  231. */
  232. smp_wmb();
  233. spin_lock(&root->fs_info->fs_roots_radix_lock);
  234. if (root->last_trans == trans->transid) {
  235. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  236. return 0;
  237. }
  238. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  239. (unsigned long)root->root_key.objectid,
  240. BTRFS_ROOT_TRANS_TAG);
  241. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  242. root->last_trans = trans->transid;
  243. /* this is pretty tricky. We don't want to
  244. * take the relocation lock in btrfs_record_root_in_trans
  245. * unless we're really doing the first setup for this root in
  246. * this transaction.
  247. *
  248. * Normally we'd use root->last_trans as a flag to decide
  249. * if we want to take the expensive mutex.
  250. *
  251. * But, we have to set root->last_trans before we
  252. * init the relocation root, otherwise, we trip over warnings
  253. * in ctree.c. The solution used here is to flag ourselves
  254. * with root IN_TRANS_SETUP. When this is 1, we're still
  255. * fixing up the reloc trees and everyone must wait.
  256. *
  257. * When this is zero, they can trust root->last_trans and fly
  258. * through btrfs_record_root_in_trans without having to take the
  259. * lock. smp_wmb() makes sure that all the writes above are
  260. * done before we pop in the zero below
  261. */
  262. btrfs_init_reloc_root(trans, root);
  263. smp_mb__before_atomic();
  264. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  265. }
  266. return 0;
  267. }
  268. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  269. struct btrfs_root *root)
  270. {
  271. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  272. return 0;
  273. /*
  274. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  275. * and barriers
  276. */
  277. smp_rmb();
  278. if (root->last_trans == trans->transid &&
  279. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  280. return 0;
  281. mutex_lock(&root->fs_info->reloc_mutex);
  282. record_root_in_trans(trans, root);
  283. mutex_unlock(&root->fs_info->reloc_mutex);
  284. return 0;
  285. }
  286. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  287. {
  288. return (trans->state >= TRANS_STATE_BLOCKED &&
  289. trans->state < TRANS_STATE_UNBLOCKED &&
  290. !trans->aborted);
  291. }
  292. /* wait for commit against the current transaction to become unblocked
  293. * when this is done, it is safe to start a new transaction, but the current
  294. * transaction might not be fully on disk.
  295. */
  296. static void wait_current_trans(struct btrfs_root *root)
  297. {
  298. struct btrfs_transaction *cur_trans;
  299. spin_lock(&root->fs_info->trans_lock);
  300. cur_trans = root->fs_info->running_transaction;
  301. if (cur_trans && is_transaction_blocked(cur_trans)) {
  302. atomic_inc(&cur_trans->use_count);
  303. spin_unlock(&root->fs_info->trans_lock);
  304. wait_event(root->fs_info->transaction_wait,
  305. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  306. cur_trans->aborted);
  307. btrfs_put_transaction(cur_trans);
  308. } else {
  309. spin_unlock(&root->fs_info->trans_lock);
  310. }
  311. }
  312. static int may_wait_transaction(struct btrfs_root *root, int type)
  313. {
  314. if (root->fs_info->log_root_recovering)
  315. return 0;
  316. if (type == TRANS_USERSPACE)
  317. return 1;
  318. if (type == TRANS_START &&
  319. !atomic_read(&root->fs_info->open_ioctl_trans))
  320. return 1;
  321. return 0;
  322. }
  323. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  324. {
  325. if (!root->fs_info->reloc_ctl ||
  326. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  327. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  328. root->reloc_root)
  329. return false;
  330. return true;
  331. }
  332. static struct btrfs_trans_handle *
  333. start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
  334. enum btrfs_reserve_flush_enum flush)
  335. {
  336. struct btrfs_trans_handle *h;
  337. struct btrfs_transaction *cur_trans;
  338. u64 num_bytes = 0;
  339. u64 qgroup_reserved = 0;
  340. bool reloc_reserved = false;
  341. int ret;
  342. /* Send isn't supposed to start transactions. */
  343. ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB);
  344. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  345. return ERR_PTR(-EROFS);
  346. if (current->journal_info) {
  347. WARN_ON(type & TRANS_EXTWRITERS);
  348. h = current->journal_info;
  349. h->use_count++;
  350. WARN_ON(h->use_count > 2);
  351. h->orig_rsv = h->block_rsv;
  352. h->block_rsv = NULL;
  353. goto got_it;
  354. }
  355. /*
  356. * Do the reservation before we join the transaction so we can do all
  357. * the appropriate flushing if need be.
  358. */
  359. if (num_items > 0 && root != root->fs_info->chunk_root) {
  360. if (root->fs_info->quota_enabled &&
  361. is_fstree(root->root_key.objectid)) {
  362. qgroup_reserved = num_items * root->leafsize;
  363. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  364. if (ret)
  365. return ERR_PTR(ret);
  366. }
  367. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  368. /*
  369. * Do the reservation for the relocation root creation
  370. */
  371. if (unlikely(need_reserve_reloc_root(root))) {
  372. num_bytes += root->nodesize;
  373. reloc_reserved = true;
  374. }
  375. ret = btrfs_block_rsv_add(root,
  376. &root->fs_info->trans_block_rsv,
  377. num_bytes, flush);
  378. if (ret)
  379. goto reserve_fail;
  380. }
  381. again:
  382. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  383. if (!h) {
  384. ret = -ENOMEM;
  385. goto alloc_fail;
  386. }
  387. /*
  388. * If we are JOIN_NOLOCK we're already committing a transaction and
  389. * waiting on this guy, so we don't need to do the sb_start_intwrite
  390. * because we're already holding a ref. We need this because we could
  391. * have raced in and did an fsync() on a file which can kick a commit
  392. * and then we deadlock with somebody doing a freeze.
  393. *
  394. * If we are ATTACH, it means we just want to catch the current
  395. * transaction and commit it, so we needn't do sb_start_intwrite().
  396. */
  397. if (type & __TRANS_FREEZABLE)
  398. sb_start_intwrite(root->fs_info->sb);
  399. if (may_wait_transaction(root, type))
  400. wait_current_trans(root);
  401. do {
  402. ret = join_transaction(root, type);
  403. if (ret == -EBUSY) {
  404. wait_current_trans(root);
  405. if (unlikely(type == TRANS_ATTACH))
  406. ret = -ENOENT;
  407. }
  408. } while (ret == -EBUSY);
  409. if (ret < 0) {
  410. /* We must get the transaction if we are JOIN_NOLOCK. */
  411. BUG_ON(type == TRANS_JOIN_NOLOCK);
  412. goto join_fail;
  413. }
  414. cur_trans = root->fs_info->running_transaction;
  415. h->transid = cur_trans->transid;
  416. h->transaction = cur_trans;
  417. h->blocks_used = 0;
  418. h->bytes_reserved = 0;
  419. h->root = root;
  420. h->delayed_ref_updates = 0;
  421. h->use_count = 1;
  422. h->adding_csums = 0;
  423. h->block_rsv = NULL;
  424. h->orig_rsv = NULL;
  425. h->aborted = 0;
  426. h->qgroup_reserved = 0;
  427. h->delayed_ref_elem.seq = 0;
  428. h->type = type;
  429. h->allocating_chunk = false;
  430. h->reloc_reserved = false;
  431. h->sync = false;
  432. INIT_LIST_HEAD(&h->qgroup_ref_list);
  433. INIT_LIST_HEAD(&h->new_bgs);
  434. smp_mb();
  435. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  436. may_wait_transaction(root, type)) {
  437. current->journal_info = h;
  438. btrfs_commit_transaction(h, root);
  439. goto again;
  440. }
  441. if (num_bytes) {
  442. trace_btrfs_space_reservation(root->fs_info, "transaction",
  443. h->transid, num_bytes, 1);
  444. h->block_rsv = &root->fs_info->trans_block_rsv;
  445. h->bytes_reserved = num_bytes;
  446. h->reloc_reserved = reloc_reserved;
  447. }
  448. h->qgroup_reserved = qgroup_reserved;
  449. got_it:
  450. btrfs_record_root_in_trans(h, root);
  451. if (!current->journal_info && type != TRANS_USERSPACE)
  452. current->journal_info = h;
  453. return h;
  454. join_fail:
  455. if (type & __TRANS_FREEZABLE)
  456. sb_end_intwrite(root->fs_info->sb);
  457. kmem_cache_free(btrfs_trans_handle_cachep, h);
  458. alloc_fail:
  459. if (num_bytes)
  460. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  461. num_bytes);
  462. reserve_fail:
  463. if (qgroup_reserved)
  464. btrfs_qgroup_free(root, qgroup_reserved);
  465. return ERR_PTR(ret);
  466. }
  467. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  468. int num_items)
  469. {
  470. return start_transaction(root, num_items, TRANS_START,
  471. BTRFS_RESERVE_FLUSH_ALL);
  472. }
  473. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  474. struct btrfs_root *root, int num_items)
  475. {
  476. return start_transaction(root, num_items, TRANS_START,
  477. BTRFS_RESERVE_FLUSH_LIMIT);
  478. }
  479. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  480. {
  481. return start_transaction(root, 0, TRANS_JOIN, 0);
  482. }
  483. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  484. {
  485. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  486. }
  487. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  488. {
  489. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  490. }
  491. /*
  492. * btrfs_attach_transaction() - catch the running transaction
  493. *
  494. * It is used when we want to commit the current the transaction, but
  495. * don't want to start a new one.
  496. *
  497. * Note: If this function return -ENOENT, it just means there is no
  498. * running transaction. But it is possible that the inactive transaction
  499. * is still in the memory, not fully on disk. If you hope there is no
  500. * inactive transaction in the fs when -ENOENT is returned, you should
  501. * invoke
  502. * btrfs_attach_transaction_barrier()
  503. */
  504. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  505. {
  506. return start_transaction(root, 0, TRANS_ATTACH, 0);
  507. }
  508. /*
  509. * btrfs_attach_transaction_barrier() - catch the running transaction
  510. *
  511. * It is similar to the above function, the differentia is this one
  512. * will wait for all the inactive transactions until they fully
  513. * complete.
  514. */
  515. struct btrfs_trans_handle *
  516. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  517. {
  518. struct btrfs_trans_handle *trans;
  519. trans = start_transaction(root, 0, TRANS_ATTACH, 0);
  520. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  521. btrfs_wait_for_commit(root, 0);
  522. return trans;
  523. }
  524. /* wait for a transaction commit to be fully complete */
  525. static noinline void wait_for_commit(struct btrfs_root *root,
  526. struct btrfs_transaction *commit)
  527. {
  528. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  529. }
  530. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  531. {
  532. struct btrfs_transaction *cur_trans = NULL, *t;
  533. int ret = 0;
  534. if (transid) {
  535. if (transid <= root->fs_info->last_trans_committed)
  536. goto out;
  537. ret = -EINVAL;
  538. /* find specified transaction */
  539. spin_lock(&root->fs_info->trans_lock);
  540. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  541. if (t->transid == transid) {
  542. cur_trans = t;
  543. atomic_inc(&cur_trans->use_count);
  544. ret = 0;
  545. break;
  546. }
  547. if (t->transid > transid) {
  548. ret = 0;
  549. break;
  550. }
  551. }
  552. spin_unlock(&root->fs_info->trans_lock);
  553. /* The specified transaction doesn't exist */
  554. if (!cur_trans)
  555. goto out;
  556. } else {
  557. /* find newest transaction that is committing | committed */
  558. spin_lock(&root->fs_info->trans_lock);
  559. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  560. list) {
  561. if (t->state >= TRANS_STATE_COMMIT_START) {
  562. if (t->state == TRANS_STATE_COMPLETED)
  563. break;
  564. cur_trans = t;
  565. atomic_inc(&cur_trans->use_count);
  566. break;
  567. }
  568. }
  569. spin_unlock(&root->fs_info->trans_lock);
  570. if (!cur_trans)
  571. goto out; /* nothing committing|committed */
  572. }
  573. wait_for_commit(root, cur_trans);
  574. btrfs_put_transaction(cur_trans);
  575. out:
  576. return ret;
  577. }
  578. void btrfs_throttle(struct btrfs_root *root)
  579. {
  580. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  581. wait_current_trans(root);
  582. }
  583. static int should_end_transaction(struct btrfs_trans_handle *trans,
  584. struct btrfs_root *root)
  585. {
  586. if (root->fs_info->global_block_rsv.space_info->full &&
  587. btrfs_check_space_for_delayed_refs(trans, root))
  588. return 1;
  589. return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  590. }
  591. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  592. struct btrfs_root *root)
  593. {
  594. struct btrfs_transaction *cur_trans = trans->transaction;
  595. int updates;
  596. int err;
  597. smp_mb();
  598. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  599. cur_trans->delayed_refs.flushing)
  600. return 1;
  601. updates = trans->delayed_ref_updates;
  602. trans->delayed_ref_updates = 0;
  603. if (updates) {
  604. err = btrfs_run_delayed_refs(trans, root, updates);
  605. if (err) /* Error code will also eval true */
  606. return err;
  607. }
  608. return should_end_transaction(trans, root);
  609. }
  610. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  611. struct btrfs_root *root, int throttle)
  612. {
  613. struct btrfs_transaction *cur_trans = trans->transaction;
  614. struct btrfs_fs_info *info = root->fs_info;
  615. unsigned long cur = trans->delayed_ref_updates;
  616. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  617. int err = 0;
  618. int must_run_delayed_refs = 0;
  619. if (trans->use_count > 1) {
  620. trans->use_count--;
  621. trans->block_rsv = trans->orig_rsv;
  622. return 0;
  623. }
  624. btrfs_trans_release_metadata(trans, root);
  625. trans->block_rsv = NULL;
  626. if (!list_empty(&trans->new_bgs))
  627. btrfs_create_pending_block_groups(trans, root);
  628. trans->delayed_ref_updates = 0;
  629. if (!trans->sync) {
  630. must_run_delayed_refs =
  631. btrfs_should_throttle_delayed_refs(trans, root);
  632. cur = max_t(unsigned long, cur, 32);
  633. /*
  634. * don't make the caller wait if they are from a NOLOCK
  635. * or ATTACH transaction, it will deadlock with commit
  636. */
  637. if (must_run_delayed_refs == 1 &&
  638. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  639. must_run_delayed_refs = 2;
  640. }
  641. if (trans->qgroup_reserved) {
  642. /*
  643. * the same root has to be passed here between start_transaction
  644. * and end_transaction. Subvolume quota depends on this.
  645. */
  646. btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
  647. trans->qgroup_reserved = 0;
  648. }
  649. btrfs_trans_release_metadata(trans, root);
  650. trans->block_rsv = NULL;
  651. if (!list_empty(&trans->new_bgs))
  652. btrfs_create_pending_block_groups(trans, root);
  653. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  654. should_end_transaction(trans, root) &&
  655. ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  656. spin_lock(&info->trans_lock);
  657. if (cur_trans->state == TRANS_STATE_RUNNING)
  658. cur_trans->state = TRANS_STATE_BLOCKED;
  659. spin_unlock(&info->trans_lock);
  660. }
  661. if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  662. if (throttle)
  663. return btrfs_commit_transaction(trans, root);
  664. else
  665. wake_up_process(info->transaction_kthread);
  666. }
  667. if (trans->type & __TRANS_FREEZABLE)
  668. sb_end_intwrite(root->fs_info->sb);
  669. WARN_ON(cur_trans != info->running_transaction);
  670. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  671. atomic_dec(&cur_trans->num_writers);
  672. extwriter_counter_dec(cur_trans, trans->type);
  673. smp_mb();
  674. if (waitqueue_active(&cur_trans->writer_wait))
  675. wake_up(&cur_trans->writer_wait);
  676. btrfs_put_transaction(cur_trans);
  677. if (current->journal_info == trans)
  678. current->journal_info = NULL;
  679. if (throttle)
  680. btrfs_run_delayed_iputs(root);
  681. if (trans->aborted ||
  682. test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  683. wake_up_process(info->transaction_kthread);
  684. err = -EIO;
  685. }
  686. assert_qgroups_uptodate(trans);
  687. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  688. if (must_run_delayed_refs) {
  689. btrfs_async_run_delayed_refs(root, cur,
  690. must_run_delayed_refs == 1);
  691. }
  692. return err;
  693. }
  694. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  695. struct btrfs_root *root)
  696. {
  697. return __btrfs_end_transaction(trans, root, 0);
  698. }
  699. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  700. struct btrfs_root *root)
  701. {
  702. return __btrfs_end_transaction(trans, root, 1);
  703. }
  704. /*
  705. * when btree blocks are allocated, they have some corresponding bits set for
  706. * them in one of two extent_io trees. This is used to make sure all of
  707. * those extents are sent to disk but does not wait on them
  708. */
  709. int btrfs_write_marked_extents(struct btrfs_root *root,
  710. struct extent_io_tree *dirty_pages, int mark)
  711. {
  712. int err = 0;
  713. int werr = 0;
  714. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  715. struct extent_state *cached_state = NULL;
  716. u64 start = 0;
  717. u64 end;
  718. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  719. mark, &cached_state)) {
  720. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  721. mark, &cached_state, GFP_NOFS);
  722. cached_state = NULL;
  723. err = filemap_fdatawrite_range(mapping, start, end);
  724. if (err)
  725. werr = err;
  726. cond_resched();
  727. start = end + 1;
  728. }
  729. if (err)
  730. werr = err;
  731. return werr;
  732. }
  733. /*
  734. * when btree blocks are allocated, they have some corresponding bits set for
  735. * them in one of two extent_io trees. This is used to make sure all of
  736. * those extents are on disk for transaction or log commit. We wait
  737. * on all the pages and clear them from the dirty pages state tree
  738. */
  739. int btrfs_wait_marked_extents(struct btrfs_root *root,
  740. struct extent_io_tree *dirty_pages, int mark)
  741. {
  742. int err = 0;
  743. int werr = 0;
  744. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  745. struct extent_state *cached_state = NULL;
  746. u64 start = 0;
  747. u64 end;
  748. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  749. EXTENT_NEED_WAIT, &cached_state)) {
  750. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  751. 0, 0, &cached_state, GFP_NOFS);
  752. err = filemap_fdatawait_range(mapping, start, end);
  753. if (err)
  754. werr = err;
  755. cond_resched();
  756. start = end + 1;
  757. }
  758. if (err)
  759. werr = err;
  760. return werr;
  761. }
  762. /*
  763. * when btree blocks are allocated, they have some corresponding bits set for
  764. * them in one of two extent_io trees. This is used to make sure all of
  765. * those extents are on disk for transaction or log commit
  766. */
  767. static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  768. struct extent_io_tree *dirty_pages, int mark)
  769. {
  770. int ret;
  771. int ret2;
  772. struct blk_plug plug;
  773. blk_start_plug(&plug);
  774. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  775. blk_finish_plug(&plug);
  776. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  777. if (ret)
  778. return ret;
  779. if (ret2)
  780. return ret2;
  781. return 0;
  782. }
  783. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  784. struct btrfs_root *root)
  785. {
  786. if (!trans || !trans->transaction) {
  787. struct inode *btree_inode;
  788. btree_inode = root->fs_info->btree_inode;
  789. return filemap_write_and_wait(btree_inode->i_mapping);
  790. }
  791. return btrfs_write_and_wait_marked_extents(root,
  792. &trans->transaction->dirty_pages,
  793. EXTENT_DIRTY);
  794. }
  795. /*
  796. * this is used to update the root pointer in the tree of tree roots.
  797. *
  798. * But, in the case of the extent allocation tree, updating the root
  799. * pointer may allocate blocks which may change the root of the extent
  800. * allocation tree.
  801. *
  802. * So, this loops and repeats and makes sure the cowonly root didn't
  803. * change while the root pointer was being updated in the metadata.
  804. */
  805. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  806. struct btrfs_root *root)
  807. {
  808. int ret;
  809. u64 old_root_bytenr;
  810. u64 old_root_used;
  811. struct btrfs_root *tree_root = root->fs_info->tree_root;
  812. old_root_used = btrfs_root_used(&root->root_item);
  813. btrfs_write_dirty_block_groups(trans, root);
  814. while (1) {
  815. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  816. if (old_root_bytenr == root->node->start &&
  817. old_root_used == btrfs_root_used(&root->root_item))
  818. break;
  819. btrfs_set_root_node(&root->root_item, root->node);
  820. ret = btrfs_update_root(trans, tree_root,
  821. &root->root_key,
  822. &root->root_item);
  823. if (ret)
  824. return ret;
  825. old_root_used = btrfs_root_used(&root->root_item);
  826. ret = btrfs_write_dirty_block_groups(trans, root);
  827. if (ret)
  828. return ret;
  829. }
  830. return 0;
  831. }
  832. /*
  833. * update all the cowonly tree roots on disk
  834. *
  835. * The error handling in this function may not be obvious. Any of the
  836. * failures will cause the file system to go offline. We still need
  837. * to clean up the delayed refs.
  838. */
  839. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  840. struct btrfs_root *root)
  841. {
  842. struct btrfs_fs_info *fs_info = root->fs_info;
  843. struct list_head *next;
  844. struct extent_buffer *eb;
  845. int ret;
  846. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  847. if (ret)
  848. return ret;
  849. eb = btrfs_lock_root_node(fs_info->tree_root);
  850. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  851. 0, &eb);
  852. btrfs_tree_unlock(eb);
  853. free_extent_buffer(eb);
  854. if (ret)
  855. return ret;
  856. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  857. if (ret)
  858. return ret;
  859. ret = btrfs_run_dev_stats(trans, root->fs_info);
  860. if (ret)
  861. return ret;
  862. ret = btrfs_run_dev_replace(trans, root->fs_info);
  863. if (ret)
  864. return ret;
  865. ret = btrfs_run_qgroups(trans, root->fs_info);
  866. if (ret)
  867. return ret;
  868. /* run_qgroups might have added some more refs */
  869. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  870. if (ret)
  871. return ret;
  872. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  873. next = fs_info->dirty_cowonly_roots.next;
  874. list_del_init(next);
  875. root = list_entry(next, struct btrfs_root, dirty_list);
  876. if (root != fs_info->extent_root)
  877. list_add_tail(&root->dirty_list,
  878. &trans->transaction->switch_commits);
  879. ret = update_cowonly_root(trans, root);
  880. if (ret)
  881. return ret;
  882. }
  883. list_add_tail(&fs_info->extent_root->dirty_list,
  884. &trans->transaction->switch_commits);
  885. btrfs_after_dev_replace_commit(fs_info);
  886. return 0;
  887. }
  888. /*
  889. * dead roots are old snapshots that need to be deleted. This allocates
  890. * a dirty root struct and adds it into the list of dead roots that need to
  891. * be deleted
  892. */
  893. void btrfs_add_dead_root(struct btrfs_root *root)
  894. {
  895. spin_lock(&root->fs_info->trans_lock);
  896. if (list_empty(&root->root_list))
  897. list_add_tail(&root->root_list, &root->fs_info->dead_roots);
  898. spin_unlock(&root->fs_info->trans_lock);
  899. }
  900. /*
  901. * update all the cowonly tree roots on disk
  902. */
  903. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  904. struct btrfs_root *root)
  905. {
  906. struct btrfs_root *gang[8];
  907. struct btrfs_fs_info *fs_info = root->fs_info;
  908. int i;
  909. int ret;
  910. int err = 0;
  911. spin_lock(&fs_info->fs_roots_radix_lock);
  912. while (1) {
  913. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  914. (void **)gang, 0,
  915. ARRAY_SIZE(gang),
  916. BTRFS_ROOT_TRANS_TAG);
  917. if (ret == 0)
  918. break;
  919. for (i = 0; i < ret; i++) {
  920. root = gang[i];
  921. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  922. (unsigned long)root->root_key.objectid,
  923. BTRFS_ROOT_TRANS_TAG);
  924. spin_unlock(&fs_info->fs_roots_radix_lock);
  925. btrfs_free_log(trans, root);
  926. btrfs_update_reloc_root(trans, root);
  927. btrfs_orphan_commit_root(trans, root);
  928. btrfs_save_ino_cache(root, trans);
  929. /* see comments in should_cow_block() */
  930. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  931. smp_mb__after_atomic();
  932. if (root->commit_root != root->node) {
  933. list_add_tail(&root->dirty_list,
  934. &trans->transaction->switch_commits);
  935. btrfs_set_root_node(&root->root_item,
  936. root->node);
  937. }
  938. err = btrfs_update_root(trans, fs_info->tree_root,
  939. &root->root_key,
  940. &root->root_item);
  941. spin_lock(&fs_info->fs_roots_radix_lock);
  942. if (err)
  943. break;
  944. }
  945. }
  946. spin_unlock(&fs_info->fs_roots_radix_lock);
  947. return err;
  948. }
  949. /*
  950. * defrag a given btree.
  951. * Every leaf in the btree is read and defragged.
  952. */
  953. int btrfs_defrag_root(struct btrfs_root *root)
  954. {
  955. struct btrfs_fs_info *info = root->fs_info;
  956. struct btrfs_trans_handle *trans;
  957. int ret;
  958. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  959. return 0;
  960. while (1) {
  961. trans = btrfs_start_transaction(root, 0);
  962. if (IS_ERR(trans))
  963. return PTR_ERR(trans);
  964. ret = btrfs_defrag_leaves(trans, root);
  965. btrfs_end_transaction(trans, root);
  966. btrfs_btree_balance_dirty(info->tree_root);
  967. cond_resched();
  968. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  969. break;
  970. if (btrfs_defrag_cancelled(root->fs_info)) {
  971. pr_debug("BTRFS: defrag_root cancelled\n");
  972. ret = -EAGAIN;
  973. break;
  974. }
  975. }
  976. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  977. return ret;
  978. }
  979. /*
  980. * new snapshots need to be created at a very specific time in the
  981. * transaction commit. This does the actual creation.
  982. *
  983. * Note:
  984. * If the error which may affect the commitment of the current transaction
  985. * happens, we should return the error number. If the error which just affect
  986. * the creation of the pending snapshots, just return 0.
  987. */
  988. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  989. struct btrfs_fs_info *fs_info,
  990. struct btrfs_pending_snapshot *pending)
  991. {
  992. struct btrfs_key key;
  993. struct btrfs_root_item *new_root_item;
  994. struct btrfs_root *tree_root = fs_info->tree_root;
  995. struct btrfs_root *root = pending->root;
  996. struct btrfs_root *parent_root;
  997. struct btrfs_block_rsv *rsv;
  998. struct inode *parent_inode;
  999. struct btrfs_path *path;
  1000. struct btrfs_dir_item *dir_item;
  1001. struct dentry *dentry;
  1002. struct extent_buffer *tmp;
  1003. struct extent_buffer *old;
  1004. struct timespec cur_time = CURRENT_TIME;
  1005. int ret = 0;
  1006. u64 to_reserve = 0;
  1007. u64 index = 0;
  1008. u64 objectid;
  1009. u64 root_flags;
  1010. uuid_le new_uuid;
  1011. path = btrfs_alloc_path();
  1012. if (!path) {
  1013. pending->error = -ENOMEM;
  1014. return 0;
  1015. }
  1016. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  1017. if (!new_root_item) {
  1018. pending->error = -ENOMEM;
  1019. goto root_item_alloc_fail;
  1020. }
  1021. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1022. if (pending->error)
  1023. goto no_free_objectid;
  1024. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  1025. if (to_reserve > 0) {
  1026. pending->error = btrfs_block_rsv_add(root,
  1027. &pending->block_rsv,
  1028. to_reserve,
  1029. BTRFS_RESERVE_NO_FLUSH);
  1030. if (pending->error)
  1031. goto no_free_objectid;
  1032. }
  1033. key.objectid = objectid;
  1034. key.offset = (u64)-1;
  1035. key.type = BTRFS_ROOT_ITEM_KEY;
  1036. rsv = trans->block_rsv;
  1037. trans->block_rsv = &pending->block_rsv;
  1038. trans->bytes_reserved = trans->block_rsv->reserved;
  1039. dentry = pending->dentry;
  1040. parent_inode = pending->dir;
  1041. parent_root = BTRFS_I(parent_inode)->root;
  1042. record_root_in_trans(trans, parent_root);
  1043. /*
  1044. * insert the directory item
  1045. */
  1046. ret = btrfs_set_inode_index(parent_inode, &index);
  1047. BUG_ON(ret); /* -ENOMEM */
  1048. /* check if there is a file/dir which has the same name. */
  1049. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1050. btrfs_ino(parent_inode),
  1051. dentry->d_name.name,
  1052. dentry->d_name.len, 0);
  1053. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1054. pending->error = -EEXIST;
  1055. goto dir_item_existed;
  1056. } else if (IS_ERR(dir_item)) {
  1057. ret = PTR_ERR(dir_item);
  1058. btrfs_abort_transaction(trans, root, ret);
  1059. goto fail;
  1060. }
  1061. btrfs_release_path(path);
  1062. /*
  1063. * pull in the delayed directory update
  1064. * and the delayed inode item
  1065. * otherwise we corrupt the FS during
  1066. * snapshot
  1067. */
  1068. ret = btrfs_run_delayed_items(trans, root);
  1069. if (ret) { /* Transaction aborted */
  1070. btrfs_abort_transaction(trans, root, ret);
  1071. goto fail;
  1072. }
  1073. record_root_in_trans(trans, root);
  1074. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1075. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1076. btrfs_check_and_init_root_item(new_root_item);
  1077. root_flags = btrfs_root_flags(new_root_item);
  1078. if (pending->readonly)
  1079. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1080. else
  1081. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1082. btrfs_set_root_flags(new_root_item, root_flags);
  1083. btrfs_set_root_generation_v2(new_root_item,
  1084. trans->transid);
  1085. uuid_le_gen(&new_uuid);
  1086. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1087. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1088. BTRFS_UUID_SIZE);
  1089. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1090. memset(new_root_item->received_uuid, 0,
  1091. sizeof(new_root_item->received_uuid));
  1092. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1093. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1094. btrfs_set_root_stransid(new_root_item, 0);
  1095. btrfs_set_root_rtransid(new_root_item, 0);
  1096. }
  1097. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1098. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1099. btrfs_set_root_otransid(new_root_item, trans->transid);
  1100. old = btrfs_lock_root_node(root);
  1101. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1102. if (ret) {
  1103. btrfs_tree_unlock(old);
  1104. free_extent_buffer(old);
  1105. btrfs_abort_transaction(trans, root, ret);
  1106. goto fail;
  1107. }
  1108. btrfs_set_lock_blocking(old);
  1109. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1110. /* clean up in any case */
  1111. btrfs_tree_unlock(old);
  1112. free_extent_buffer(old);
  1113. if (ret) {
  1114. btrfs_abort_transaction(trans, root, ret);
  1115. goto fail;
  1116. }
  1117. /*
  1118. * We need to flush delayed refs in order to make sure all of our quota
  1119. * operations have been done before we call btrfs_qgroup_inherit.
  1120. */
  1121. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1122. if (ret) {
  1123. btrfs_abort_transaction(trans, root, ret);
  1124. goto fail;
  1125. }
  1126. ret = btrfs_qgroup_inherit(trans, fs_info,
  1127. root->root_key.objectid,
  1128. objectid, pending->inherit);
  1129. if (ret) {
  1130. btrfs_abort_transaction(trans, root, ret);
  1131. goto fail;
  1132. }
  1133. /* see comments in should_cow_block() */
  1134. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1135. smp_wmb();
  1136. btrfs_set_root_node(new_root_item, tmp);
  1137. /* record when the snapshot was created in key.offset */
  1138. key.offset = trans->transid;
  1139. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1140. btrfs_tree_unlock(tmp);
  1141. free_extent_buffer(tmp);
  1142. if (ret) {
  1143. btrfs_abort_transaction(trans, root, ret);
  1144. goto fail;
  1145. }
  1146. /*
  1147. * insert root back/forward references
  1148. */
  1149. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1150. parent_root->root_key.objectid,
  1151. btrfs_ino(parent_inode), index,
  1152. dentry->d_name.name, dentry->d_name.len);
  1153. if (ret) {
  1154. btrfs_abort_transaction(trans, root, ret);
  1155. goto fail;
  1156. }
  1157. key.offset = (u64)-1;
  1158. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1159. if (IS_ERR(pending->snap)) {
  1160. ret = PTR_ERR(pending->snap);
  1161. btrfs_abort_transaction(trans, root, ret);
  1162. goto fail;
  1163. }
  1164. ret = btrfs_reloc_post_snapshot(trans, pending);
  1165. if (ret) {
  1166. btrfs_abort_transaction(trans, root, ret);
  1167. goto fail;
  1168. }
  1169. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1170. if (ret) {
  1171. btrfs_abort_transaction(trans, root, ret);
  1172. goto fail;
  1173. }
  1174. ret = btrfs_insert_dir_item(trans, parent_root,
  1175. dentry->d_name.name, dentry->d_name.len,
  1176. parent_inode, &key,
  1177. BTRFS_FT_DIR, index);
  1178. /* We have check then name at the beginning, so it is impossible. */
  1179. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1180. if (ret) {
  1181. btrfs_abort_transaction(trans, root, ret);
  1182. goto fail;
  1183. }
  1184. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1185. dentry->d_name.len * 2);
  1186. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1187. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1188. if (ret) {
  1189. btrfs_abort_transaction(trans, root, ret);
  1190. goto fail;
  1191. }
  1192. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
  1193. BTRFS_UUID_KEY_SUBVOL, objectid);
  1194. if (ret) {
  1195. btrfs_abort_transaction(trans, root, ret);
  1196. goto fail;
  1197. }
  1198. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1199. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
  1200. new_root_item->received_uuid,
  1201. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1202. objectid);
  1203. if (ret && ret != -EEXIST) {
  1204. btrfs_abort_transaction(trans, root, ret);
  1205. goto fail;
  1206. }
  1207. }
  1208. fail:
  1209. pending->error = ret;
  1210. dir_item_existed:
  1211. trans->block_rsv = rsv;
  1212. trans->bytes_reserved = 0;
  1213. no_free_objectid:
  1214. kfree(new_root_item);
  1215. root_item_alloc_fail:
  1216. btrfs_free_path(path);
  1217. return ret;
  1218. }
  1219. /*
  1220. * create all the snapshots we've scheduled for creation
  1221. */
  1222. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1223. struct btrfs_fs_info *fs_info)
  1224. {
  1225. struct btrfs_pending_snapshot *pending, *next;
  1226. struct list_head *head = &trans->transaction->pending_snapshots;
  1227. int ret = 0;
  1228. list_for_each_entry_safe(pending, next, head, list) {
  1229. list_del(&pending->list);
  1230. ret = create_pending_snapshot(trans, fs_info, pending);
  1231. if (ret)
  1232. break;
  1233. }
  1234. return ret;
  1235. }
  1236. static void update_super_roots(struct btrfs_root *root)
  1237. {
  1238. struct btrfs_root_item *root_item;
  1239. struct btrfs_super_block *super;
  1240. super = root->fs_info->super_copy;
  1241. root_item = &root->fs_info->chunk_root->root_item;
  1242. super->chunk_root = root_item->bytenr;
  1243. super->chunk_root_generation = root_item->generation;
  1244. super->chunk_root_level = root_item->level;
  1245. root_item = &root->fs_info->tree_root->root_item;
  1246. super->root = root_item->bytenr;
  1247. super->generation = root_item->generation;
  1248. super->root_level = root_item->level;
  1249. if (btrfs_test_opt(root, SPACE_CACHE))
  1250. super->cache_generation = root_item->generation;
  1251. if (root->fs_info->update_uuid_tree_gen)
  1252. super->uuid_tree_generation = root_item->generation;
  1253. }
  1254. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1255. {
  1256. struct btrfs_transaction *trans;
  1257. int ret = 0;
  1258. spin_lock(&info->trans_lock);
  1259. trans = info->running_transaction;
  1260. if (trans)
  1261. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1262. spin_unlock(&info->trans_lock);
  1263. return ret;
  1264. }
  1265. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1266. {
  1267. struct btrfs_transaction *trans;
  1268. int ret = 0;
  1269. spin_lock(&info->trans_lock);
  1270. trans = info->running_transaction;
  1271. if (trans)
  1272. ret = is_transaction_blocked(trans);
  1273. spin_unlock(&info->trans_lock);
  1274. return ret;
  1275. }
  1276. /*
  1277. * wait for the current transaction commit to start and block subsequent
  1278. * transaction joins
  1279. */
  1280. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1281. struct btrfs_transaction *trans)
  1282. {
  1283. wait_event(root->fs_info->transaction_blocked_wait,
  1284. trans->state >= TRANS_STATE_COMMIT_START ||
  1285. trans->aborted);
  1286. }
  1287. /*
  1288. * wait for the current transaction to start and then become unblocked.
  1289. * caller holds ref.
  1290. */
  1291. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1292. struct btrfs_transaction *trans)
  1293. {
  1294. wait_event(root->fs_info->transaction_wait,
  1295. trans->state >= TRANS_STATE_UNBLOCKED ||
  1296. trans->aborted);
  1297. }
  1298. /*
  1299. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1300. * returns, any subsequent transaction will not be allowed to join.
  1301. */
  1302. struct btrfs_async_commit {
  1303. struct btrfs_trans_handle *newtrans;
  1304. struct btrfs_root *root;
  1305. struct work_struct work;
  1306. };
  1307. static void do_async_commit(struct work_struct *work)
  1308. {
  1309. struct btrfs_async_commit *ac =
  1310. container_of(work, struct btrfs_async_commit, work);
  1311. /*
  1312. * We've got freeze protection passed with the transaction.
  1313. * Tell lockdep about it.
  1314. */
  1315. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1316. rwsem_acquire_read(
  1317. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1318. 0, 1, _THIS_IP_);
  1319. current->journal_info = ac->newtrans;
  1320. btrfs_commit_transaction(ac->newtrans, ac->root);
  1321. kfree(ac);
  1322. }
  1323. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1324. struct btrfs_root *root,
  1325. int wait_for_unblock)
  1326. {
  1327. struct btrfs_async_commit *ac;
  1328. struct btrfs_transaction *cur_trans;
  1329. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1330. if (!ac)
  1331. return -ENOMEM;
  1332. INIT_WORK(&ac->work, do_async_commit);
  1333. ac->root = root;
  1334. ac->newtrans = btrfs_join_transaction(root);
  1335. if (IS_ERR(ac->newtrans)) {
  1336. int err = PTR_ERR(ac->newtrans);
  1337. kfree(ac);
  1338. return err;
  1339. }
  1340. /* take transaction reference */
  1341. cur_trans = trans->transaction;
  1342. atomic_inc(&cur_trans->use_count);
  1343. btrfs_end_transaction(trans, root);
  1344. /*
  1345. * Tell lockdep we've released the freeze rwsem, since the
  1346. * async commit thread will be the one to unlock it.
  1347. */
  1348. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1349. rwsem_release(
  1350. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1351. 1, _THIS_IP_);
  1352. schedule_work(&ac->work);
  1353. /* wait for transaction to start and unblock */
  1354. if (wait_for_unblock)
  1355. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1356. else
  1357. wait_current_trans_commit_start(root, cur_trans);
  1358. if (current->journal_info == trans)
  1359. current->journal_info = NULL;
  1360. btrfs_put_transaction(cur_trans);
  1361. return 0;
  1362. }
  1363. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1364. struct btrfs_root *root, int err)
  1365. {
  1366. struct btrfs_transaction *cur_trans = trans->transaction;
  1367. DEFINE_WAIT(wait);
  1368. WARN_ON(trans->use_count > 1);
  1369. btrfs_abort_transaction(trans, root, err);
  1370. spin_lock(&root->fs_info->trans_lock);
  1371. /*
  1372. * If the transaction is removed from the list, it means this
  1373. * transaction has been committed successfully, so it is impossible
  1374. * to call the cleanup function.
  1375. */
  1376. BUG_ON(list_empty(&cur_trans->list));
  1377. list_del_init(&cur_trans->list);
  1378. if (cur_trans == root->fs_info->running_transaction) {
  1379. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1380. spin_unlock(&root->fs_info->trans_lock);
  1381. wait_event(cur_trans->writer_wait,
  1382. atomic_read(&cur_trans->num_writers) == 1);
  1383. spin_lock(&root->fs_info->trans_lock);
  1384. }
  1385. spin_unlock(&root->fs_info->trans_lock);
  1386. btrfs_cleanup_one_transaction(trans->transaction, root);
  1387. spin_lock(&root->fs_info->trans_lock);
  1388. if (cur_trans == root->fs_info->running_transaction)
  1389. root->fs_info->running_transaction = NULL;
  1390. spin_unlock(&root->fs_info->trans_lock);
  1391. if (trans->type & __TRANS_FREEZABLE)
  1392. sb_end_intwrite(root->fs_info->sb);
  1393. btrfs_put_transaction(cur_trans);
  1394. btrfs_put_transaction(cur_trans);
  1395. trace_btrfs_transaction_commit(root);
  1396. if (current->journal_info == trans)
  1397. current->journal_info = NULL;
  1398. btrfs_scrub_cancel(root->fs_info);
  1399. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1400. }
  1401. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1402. {
  1403. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1404. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1405. return 0;
  1406. }
  1407. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1408. {
  1409. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1410. btrfs_wait_ordered_roots(fs_info, -1);
  1411. }
  1412. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1413. struct btrfs_root *root)
  1414. {
  1415. struct btrfs_transaction *cur_trans = trans->transaction;
  1416. struct btrfs_transaction *prev_trans = NULL;
  1417. int ret;
  1418. /* Stop the commit early if ->aborted is set */
  1419. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1420. ret = cur_trans->aborted;
  1421. btrfs_end_transaction(trans, root);
  1422. return ret;
  1423. }
  1424. /* make a pass through all the delayed refs we have so far
  1425. * any runnings procs may add more while we are here
  1426. */
  1427. ret = btrfs_run_delayed_refs(trans, root, 0);
  1428. if (ret) {
  1429. btrfs_end_transaction(trans, root);
  1430. return ret;
  1431. }
  1432. btrfs_trans_release_metadata(trans, root);
  1433. trans->block_rsv = NULL;
  1434. if (trans->qgroup_reserved) {
  1435. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1436. trans->qgroup_reserved = 0;
  1437. }
  1438. cur_trans = trans->transaction;
  1439. /*
  1440. * set the flushing flag so procs in this transaction have to
  1441. * start sending their work down.
  1442. */
  1443. cur_trans->delayed_refs.flushing = 1;
  1444. smp_wmb();
  1445. if (!list_empty(&trans->new_bgs))
  1446. btrfs_create_pending_block_groups(trans, root);
  1447. ret = btrfs_run_delayed_refs(trans, root, 0);
  1448. if (ret) {
  1449. btrfs_end_transaction(trans, root);
  1450. return ret;
  1451. }
  1452. spin_lock(&root->fs_info->trans_lock);
  1453. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1454. spin_unlock(&root->fs_info->trans_lock);
  1455. atomic_inc(&cur_trans->use_count);
  1456. ret = btrfs_end_transaction(trans, root);
  1457. wait_for_commit(root, cur_trans);
  1458. btrfs_put_transaction(cur_trans);
  1459. return ret;
  1460. }
  1461. cur_trans->state = TRANS_STATE_COMMIT_START;
  1462. wake_up(&root->fs_info->transaction_blocked_wait);
  1463. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1464. prev_trans = list_entry(cur_trans->list.prev,
  1465. struct btrfs_transaction, list);
  1466. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1467. atomic_inc(&prev_trans->use_count);
  1468. spin_unlock(&root->fs_info->trans_lock);
  1469. wait_for_commit(root, prev_trans);
  1470. btrfs_put_transaction(prev_trans);
  1471. } else {
  1472. spin_unlock(&root->fs_info->trans_lock);
  1473. }
  1474. } else {
  1475. spin_unlock(&root->fs_info->trans_lock);
  1476. }
  1477. extwriter_counter_dec(cur_trans, trans->type);
  1478. ret = btrfs_start_delalloc_flush(root->fs_info);
  1479. if (ret)
  1480. goto cleanup_transaction;
  1481. ret = btrfs_run_delayed_items(trans, root);
  1482. if (ret)
  1483. goto cleanup_transaction;
  1484. wait_event(cur_trans->writer_wait,
  1485. extwriter_counter_read(cur_trans) == 0);
  1486. /* some pending stuffs might be added after the previous flush. */
  1487. ret = btrfs_run_delayed_items(trans, root);
  1488. if (ret)
  1489. goto cleanup_transaction;
  1490. btrfs_wait_delalloc_flush(root->fs_info);
  1491. btrfs_scrub_pause(root);
  1492. /*
  1493. * Ok now we need to make sure to block out any other joins while we
  1494. * commit the transaction. We could have started a join before setting
  1495. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1496. */
  1497. spin_lock(&root->fs_info->trans_lock);
  1498. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1499. spin_unlock(&root->fs_info->trans_lock);
  1500. wait_event(cur_trans->writer_wait,
  1501. atomic_read(&cur_trans->num_writers) == 1);
  1502. /* ->aborted might be set after the previous check, so check it */
  1503. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1504. ret = cur_trans->aborted;
  1505. goto scrub_continue;
  1506. }
  1507. /*
  1508. * the reloc mutex makes sure that we stop
  1509. * the balancing code from coming in and moving
  1510. * extents around in the middle of the commit
  1511. */
  1512. mutex_lock(&root->fs_info->reloc_mutex);
  1513. /*
  1514. * We needn't worry about the delayed items because we will
  1515. * deal with them in create_pending_snapshot(), which is the
  1516. * core function of the snapshot creation.
  1517. */
  1518. ret = create_pending_snapshots(trans, root->fs_info);
  1519. if (ret) {
  1520. mutex_unlock(&root->fs_info->reloc_mutex);
  1521. goto scrub_continue;
  1522. }
  1523. /*
  1524. * We insert the dir indexes of the snapshots and update the inode
  1525. * of the snapshots' parents after the snapshot creation, so there
  1526. * are some delayed items which are not dealt with. Now deal with
  1527. * them.
  1528. *
  1529. * We needn't worry that this operation will corrupt the snapshots,
  1530. * because all the tree which are snapshoted will be forced to COW
  1531. * the nodes and leaves.
  1532. */
  1533. ret = btrfs_run_delayed_items(trans, root);
  1534. if (ret) {
  1535. mutex_unlock(&root->fs_info->reloc_mutex);
  1536. goto scrub_continue;
  1537. }
  1538. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1539. if (ret) {
  1540. mutex_unlock(&root->fs_info->reloc_mutex);
  1541. goto scrub_continue;
  1542. }
  1543. /*
  1544. * make sure none of the code above managed to slip in a
  1545. * delayed item
  1546. */
  1547. btrfs_assert_delayed_root_empty(root);
  1548. WARN_ON(cur_trans != trans->transaction);
  1549. /* btrfs_commit_tree_roots is responsible for getting the
  1550. * various roots consistent with each other. Every pointer
  1551. * in the tree of tree roots has to point to the most up to date
  1552. * root for every subvolume and other tree. So, we have to keep
  1553. * the tree logging code from jumping in and changing any
  1554. * of the trees.
  1555. *
  1556. * At this point in the commit, there can't be any tree-log
  1557. * writers, but a little lower down we drop the trans mutex
  1558. * and let new people in. By holding the tree_log_mutex
  1559. * from now until after the super is written, we avoid races
  1560. * with the tree-log code.
  1561. */
  1562. mutex_lock(&root->fs_info->tree_log_mutex);
  1563. ret = commit_fs_roots(trans, root);
  1564. if (ret) {
  1565. mutex_unlock(&root->fs_info->tree_log_mutex);
  1566. mutex_unlock(&root->fs_info->reloc_mutex);
  1567. goto scrub_continue;
  1568. }
  1569. /*
  1570. * Since the transaction is done, we should set the inode map cache flag
  1571. * before any other comming transaction.
  1572. */
  1573. if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
  1574. btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
  1575. else
  1576. btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
  1577. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1578. * safe to free the root of tree log roots
  1579. */
  1580. btrfs_free_log_root_tree(trans, root->fs_info);
  1581. ret = commit_cowonly_roots(trans, root);
  1582. if (ret) {
  1583. mutex_unlock(&root->fs_info->tree_log_mutex);
  1584. mutex_unlock(&root->fs_info->reloc_mutex);
  1585. goto scrub_continue;
  1586. }
  1587. /*
  1588. * The tasks which save the space cache and inode cache may also
  1589. * update ->aborted, check it.
  1590. */
  1591. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1592. ret = cur_trans->aborted;
  1593. mutex_unlock(&root->fs_info->tree_log_mutex);
  1594. mutex_unlock(&root->fs_info->reloc_mutex);
  1595. goto scrub_continue;
  1596. }
  1597. btrfs_prepare_extent_commit(trans, root);
  1598. cur_trans = root->fs_info->running_transaction;
  1599. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1600. root->fs_info->tree_root->node);
  1601. list_add_tail(&root->fs_info->tree_root->dirty_list,
  1602. &cur_trans->switch_commits);
  1603. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1604. root->fs_info->chunk_root->node);
  1605. list_add_tail(&root->fs_info->chunk_root->dirty_list,
  1606. &cur_trans->switch_commits);
  1607. switch_commit_roots(cur_trans, root->fs_info);
  1608. assert_qgroups_uptodate(trans);
  1609. update_super_roots(root);
  1610. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1611. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1612. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1613. sizeof(*root->fs_info->super_copy));
  1614. spin_lock(&root->fs_info->trans_lock);
  1615. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1616. root->fs_info->running_transaction = NULL;
  1617. spin_unlock(&root->fs_info->trans_lock);
  1618. mutex_unlock(&root->fs_info->reloc_mutex);
  1619. wake_up(&root->fs_info->transaction_wait);
  1620. ret = btrfs_write_and_wait_transaction(trans, root);
  1621. if (ret) {
  1622. btrfs_error(root->fs_info, ret,
  1623. "Error while writing out transaction");
  1624. mutex_unlock(&root->fs_info->tree_log_mutex);
  1625. goto scrub_continue;
  1626. }
  1627. ret = write_ctree_super(trans, root, 0);
  1628. if (ret) {
  1629. mutex_unlock(&root->fs_info->tree_log_mutex);
  1630. goto scrub_continue;
  1631. }
  1632. /*
  1633. * the super is written, we can safely allow the tree-loggers
  1634. * to go about their business
  1635. */
  1636. mutex_unlock(&root->fs_info->tree_log_mutex);
  1637. btrfs_finish_extent_commit(trans, root);
  1638. root->fs_info->last_trans_committed = cur_trans->transid;
  1639. /*
  1640. * We needn't acquire the lock here because there is no other task
  1641. * which can change it.
  1642. */
  1643. cur_trans->state = TRANS_STATE_COMPLETED;
  1644. wake_up(&cur_trans->commit_wait);
  1645. spin_lock(&root->fs_info->trans_lock);
  1646. list_del_init(&cur_trans->list);
  1647. spin_unlock(&root->fs_info->trans_lock);
  1648. btrfs_put_transaction(cur_trans);
  1649. btrfs_put_transaction(cur_trans);
  1650. if (trans->type & __TRANS_FREEZABLE)
  1651. sb_end_intwrite(root->fs_info->sb);
  1652. trace_btrfs_transaction_commit(root);
  1653. btrfs_scrub_continue(root);
  1654. if (current->journal_info == trans)
  1655. current->journal_info = NULL;
  1656. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1657. if (current != root->fs_info->transaction_kthread)
  1658. btrfs_run_delayed_iputs(root);
  1659. return ret;
  1660. scrub_continue:
  1661. btrfs_scrub_continue(root);
  1662. cleanup_transaction:
  1663. btrfs_trans_release_metadata(trans, root);
  1664. trans->block_rsv = NULL;
  1665. if (trans->qgroup_reserved) {
  1666. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1667. trans->qgroup_reserved = 0;
  1668. }
  1669. btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
  1670. if (current->journal_info == trans)
  1671. current->journal_info = NULL;
  1672. cleanup_transaction(trans, root, ret);
  1673. return ret;
  1674. }
  1675. /*
  1676. * return < 0 if error
  1677. * 0 if there are no more dead_roots at the time of call
  1678. * 1 there are more to be processed, call me again
  1679. *
  1680. * The return value indicates there are certainly more snapshots to delete, but
  1681. * if there comes a new one during processing, it may return 0. We don't mind,
  1682. * because btrfs_commit_super will poke cleaner thread and it will process it a
  1683. * few seconds later.
  1684. */
  1685. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  1686. {
  1687. int ret;
  1688. struct btrfs_fs_info *fs_info = root->fs_info;
  1689. spin_lock(&fs_info->trans_lock);
  1690. if (list_empty(&fs_info->dead_roots)) {
  1691. spin_unlock(&fs_info->trans_lock);
  1692. return 0;
  1693. }
  1694. root = list_first_entry(&fs_info->dead_roots,
  1695. struct btrfs_root, root_list);
  1696. list_del_init(&root->root_list);
  1697. spin_unlock(&fs_info->trans_lock);
  1698. pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
  1699. btrfs_kill_all_delayed_nodes(root);
  1700. if (btrfs_header_backref_rev(root->node) <
  1701. BTRFS_MIXED_BACKREF_REV)
  1702. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1703. else
  1704. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  1705. /*
  1706. * If we encounter a transaction abort during snapshot cleaning, we
  1707. * don't want to crash here
  1708. */
  1709. return (ret < 0) ? 0 : 1;
  1710. }