transaction.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(atomic_read(&transaction->use_count) == 0);
  60. if (atomic_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. while (!list_empty(&transaction->pending_chunks)) {
  64. struct extent_map *em;
  65. em = list_first_entry(&transaction->pending_chunks,
  66. struct extent_map, list);
  67. list_del_init(&em->list);
  68. free_extent_map(em);
  69. }
  70. kmem_cache_free(btrfs_transaction_cachep, transaction);
  71. }
  72. }
  73. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  74. struct btrfs_fs_info *fs_info)
  75. {
  76. struct btrfs_root *root, *tmp;
  77. down_write(&fs_info->commit_root_sem);
  78. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  79. dirty_list) {
  80. list_del_init(&root->dirty_list);
  81. free_extent_buffer(root->commit_root);
  82. root->commit_root = btrfs_root_node(root);
  83. if (is_fstree(root->objectid))
  84. btrfs_unpin_free_ino(root);
  85. }
  86. up_write(&fs_info->commit_root_sem);
  87. }
  88. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  89. unsigned int type)
  90. {
  91. if (type & TRANS_EXTWRITERS)
  92. atomic_inc(&trans->num_extwriters);
  93. }
  94. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  95. unsigned int type)
  96. {
  97. if (type & TRANS_EXTWRITERS)
  98. atomic_dec(&trans->num_extwriters);
  99. }
  100. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  101. unsigned int type)
  102. {
  103. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  104. }
  105. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  106. {
  107. return atomic_read(&trans->num_extwriters);
  108. }
  109. /*
  110. * either allocate a new transaction or hop into the existing one
  111. */
  112. static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
  113. {
  114. struct btrfs_transaction *cur_trans;
  115. struct btrfs_fs_info *fs_info = root->fs_info;
  116. spin_lock(&fs_info->trans_lock);
  117. loop:
  118. /* The file system has been taken offline. No new transactions. */
  119. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  120. spin_unlock(&fs_info->trans_lock);
  121. return -EROFS;
  122. }
  123. cur_trans = fs_info->running_transaction;
  124. if (cur_trans) {
  125. if (cur_trans->aborted) {
  126. spin_unlock(&fs_info->trans_lock);
  127. return cur_trans->aborted;
  128. }
  129. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  130. spin_unlock(&fs_info->trans_lock);
  131. return -EBUSY;
  132. }
  133. atomic_inc(&cur_trans->use_count);
  134. atomic_inc(&cur_trans->num_writers);
  135. extwriter_counter_inc(cur_trans, type);
  136. spin_unlock(&fs_info->trans_lock);
  137. return 0;
  138. }
  139. spin_unlock(&fs_info->trans_lock);
  140. /*
  141. * If we are ATTACH, we just want to catch the current transaction,
  142. * and commit it. If there is no transaction, just return ENOENT.
  143. */
  144. if (type == TRANS_ATTACH)
  145. return -ENOENT;
  146. /*
  147. * JOIN_NOLOCK only happens during the transaction commit, so
  148. * it is impossible that ->running_transaction is NULL
  149. */
  150. BUG_ON(type == TRANS_JOIN_NOLOCK);
  151. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  152. if (!cur_trans)
  153. return -ENOMEM;
  154. spin_lock(&fs_info->trans_lock);
  155. if (fs_info->running_transaction) {
  156. /*
  157. * someone started a transaction after we unlocked. Make sure
  158. * to redo the checks above
  159. */
  160. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  161. goto loop;
  162. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  163. spin_unlock(&fs_info->trans_lock);
  164. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  165. return -EROFS;
  166. }
  167. atomic_set(&cur_trans->num_writers, 1);
  168. extwriter_counter_init(cur_trans, type);
  169. init_waitqueue_head(&cur_trans->writer_wait);
  170. init_waitqueue_head(&cur_trans->commit_wait);
  171. cur_trans->state = TRANS_STATE_RUNNING;
  172. /*
  173. * One for this trans handle, one so it will live on until we
  174. * commit the transaction.
  175. */
  176. atomic_set(&cur_trans->use_count, 2);
  177. cur_trans->start_time = get_seconds();
  178. cur_trans->delayed_refs.href_root = RB_ROOT;
  179. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  180. cur_trans->delayed_refs.num_heads_ready = 0;
  181. cur_trans->delayed_refs.num_heads = 0;
  182. cur_trans->delayed_refs.flushing = 0;
  183. cur_trans->delayed_refs.run_delayed_start = 0;
  184. /*
  185. * although the tree mod log is per file system and not per transaction,
  186. * the log must never go across transaction boundaries.
  187. */
  188. smp_mb();
  189. if (!list_empty(&fs_info->tree_mod_seq_list))
  190. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
  191. "creating a fresh transaction\n");
  192. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  193. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
  194. "creating a fresh transaction\n");
  195. atomic64_set(&fs_info->tree_mod_seq, 0);
  196. spin_lock_init(&cur_trans->delayed_refs.lock);
  197. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  198. INIT_LIST_HEAD(&cur_trans->ordered_operations);
  199. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  200. INIT_LIST_HEAD(&cur_trans->switch_commits);
  201. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  202. extent_io_tree_init(&cur_trans->dirty_pages,
  203. fs_info->btree_inode->i_mapping);
  204. fs_info->generation++;
  205. cur_trans->transid = fs_info->generation;
  206. fs_info->running_transaction = cur_trans;
  207. cur_trans->aborted = 0;
  208. spin_unlock(&fs_info->trans_lock);
  209. return 0;
  210. }
  211. /*
  212. * this does all the record keeping required to make sure that a reference
  213. * counted root is properly recorded in a given transaction. This is required
  214. * to make sure the old root from before we joined the transaction is deleted
  215. * when the transaction commits
  216. */
  217. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  218. struct btrfs_root *root)
  219. {
  220. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  221. root->last_trans < trans->transid) {
  222. WARN_ON(root == root->fs_info->extent_root);
  223. WARN_ON(root->commit_root != root->node);
  224. /*
  225. * see below for IN_TRANS_SETUP usage rules
  226. * we have the reloc mutex held now, so there
  227. * is only one writer in this function
  228. */
  229. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  230. /* make sure readers find IN_TRANS_SETUP before
  231. * they find our root->last_trans update
  232. */
  233. smp_wmb();
  234. spin_lock(&root->fs_info->fs_roots_radix_lock);
  235. if (root->last_trans == trans->transid) {
  236. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  237. return 0;
  238. }
  239. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  240. (unsigned long)root->root_key.objectid,
  241. BTRFS_ROOT_TRANS_TAG);
  242. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  243. root->last_trans = trans->transid;
  244. /* this is pretty tricky. We don't want to
  245. * take the relocation lock in btrfs_record_root_in_trans
  246. * unless we're really doing the first setup for this root in
  247. * this transaction.
  248. *
  249. * Normally we'd use root->last_trans as a flag to decide
  250. * if we want to take the expensive mutex.
  251. *
  252. * But, we have to set root->last_trans before we
  253. * init the relocation root, otherwise, we trip over warnings
  254. * in ctree.c. The solution used here is to flag ourselves
  255. * with root IN_TRANS_SETUP. When this is 1, we're still
  256. * fixing up the reloc trees and everyone must wait.
  257. *
  258. * When this is zero, they can trust root->last_trans and fly
  259. * through btrfs_record_root_in_trans without having to take the
  260. * lock. smp_wmb() makes sure that all the writes above are
  261. * done before we pop in the zero below
  262. */
  263. btrfs_init_reloc_root(trans, root);
  264. smp_mb__before_atomic();
  265. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  266. }
  267. return 0;
  268. }
  269. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  270. struct btrfs_root *root)
  271. {
  272. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  273. return 0;
  274. /*
  275. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  276. * and barriers
  277. */
  278. smp_rmb();
  279. if (root->last_trans == trans->transid &&
  280. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  281. return 0;
  282. mutex_lock(&root->fs_info->reloc_mutex);
  283. record_root_in_trans(trans, root);
  284. mutex_unlock(&root->fs_info->reloc_mutex);
  285. return 0;
  286. }
  287. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  288. {
  289. return (trans->state >= TRANS_STATE_BLOCKED &&
  290. trans->state < TRANS_STATE_UNBLOCKED &&
  291. !trans->aborted);
  292. }
  293. /* wait for commit against the current transaction to become unblocked
  294. * when this is done, it is safe to start a new transaction, but the current
  295. * transaction might not be fully on disk.
  296. */
  297. static void wait_current_trans(struct btrfs_root *root)
  298. {
  299. struct btrfs_transaction *cur_trans;
  300. spin_lock(&root->fs_info->trans_lock);
  301. cur_trans = root->fs_info->running_transaction;
  302. if (cur_trans && is_transaction_blocked(cur_trans)) {
  303. atomic_inc(&cur_trans->use_count);
  304. spin_unlock(&root->fs_info->trans_lock);
  305. wait_event(root->fs_info->transaction_wait,
  306. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  307. cur_trans->aborted);
  308. btrfs_put_transaction(cur_trans);
  309. } else {
  310. spin_unlock(&root->fs_info->trans_lock);
  311. }
  312. }
  313. static int may_wait_transaction(struct btrfs_root *root, int type)
  314. {
  315. if (root->fs_info->log_root_recovering)
  316. return 0;
  317. if (type == TRANS_USERSPACE)
  318. return 1;
  319. if (type == TRANS_START &&
  320. !atomic_read(&root->fs_info->open_ioctl_trans))
  321. return 1;
  322. return 0;
  323. }
  324. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  325. {
  326. if (!root->fs_info->reloc_ctl ||
  327. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  328. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  329. root->reloc_root)
  330. return false;
  331. return true;
  332. }
  333. static struct btrfs_trans_handle *
  334. start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
  335. enum btrfs_reserve_flush_enum flush)
  336. {
  337. struct btrfs_trans_handle *h;
  338. struct btrfs_transaction *cur_trans;
  339. u64 num_bytes = 0;
  340. u64 qgroup_reserved = 0;
  341. bool reloc_reserved = false;
  342. int ret;
  343. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  344. return ERR_PTR(-EROFS);
  345. if (current->journal_info &&
  346. current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
  347. WARN_ON(type & TRANS_EXTWRITERS);
  348. h = current->journal_info;
  349. h->use_count++;
  350. WARN_ON(h->use_count > 2);
  351. h->orig_rsv = h->block_rsv;
  352. h->block_rsv = NULL;
  353. goto got_it;
  354. }
  355. /*
  356. * Do the reservation before we join the transaction so we can do all
  357. * the appropriate flushing if need be.
  358. */
  359. if (num_items > 0 && root != root->fs_info->chunk_root) {
  360. if (root->fs_info->quota_enabled &&
  361. is_fstree(root->root_key.objectid)) {
  362. qgroup_reserved = num_items * root->leafsize;
  363. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  364. if (ret)
  365. return ERR_PTR(ret);
  366. }
  367. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  368. /*
  369. * Do the reservation for the relocation root creation
  370. */
  371. if (unlikely(need_reserve_reloc_root(root))) {
  372. num_bytes += root->nodesize;
  373. reloc_reserved = true;
  374. }
  375. ret = btrfs_block_rsv_add(root,
  376. &root->fs_info->trans_block_rsv,
  377. num_bytes, flush);
  378. if (ret)
  379. goto reserve_fail;
  380. }
  381. again:
  382. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  383. if (!h) {
  384. ret = -ENOMEM;
  385. goto alloc_fail;
  386. }
  387. /*
  388. * If we are JOIN_NOLOCK we're already committing a transaction and
  389. * waiting on this guy, so we don't need to do the sb_start_intwrite
  390. * because we're already holding a ref. We need this because we could
  391. * have raced in and did an fsync() on a file which can kick a commit
  392. * and then we deadlock with somebody doing a freeze.
  393. *
  394. * If we are ATTACH, it means we just want to catch the current
  395. * transaction and commit it, so we needn't do sb_start_intwrite().
  396. */
  397. if (type & __TRANS_FREEZABLE)
  398. sb_start_intwrite(root->fs_info->sb);
  399. if (may_wait_transaction(root, type))
  400. wait_current_trans(root);
  401. do {
  402. ret = join_transaction(root, type);
  403. if (ret == -EBUSY) {
  404. wait_current_trans(root);
  405. if (unlikely(type == TRANS_ATTACH))
  406. ret = -ENOENT;
  407. }
  408. } while (ret == -EBUSY);
  409. if (ret < 0) {
  410. /* We must get the transaction if we are JOIN_NOLOCK. */
  411. BUG_ON(type == TRANS_JOIN_NOLOCK);
  412. goto join_fail;
  413. }
  414. cur_trans = root->fs_info->running_transaction;
  415. h->transid = cur_trans->transid;
  416. h->transaction = cur_trans;
  417. h->blocks_used = 0;
  418. h->bytes_reserved = 0;
  419. h->root = root;
  420. h->delayed_ref_updates = 0;
  421. h->use_count = 1;
  422. h->adding_csums = 0;
  423. h->block_rsv = NULL;
  424. h->orig_rsv = NULL;
  425. h->aborted = 0;
  426. h->qgroup_reserved = 0;
  427. h->delayed_ref_elem.seq = 0;
  428. h->type = type;
  429. h->allocating_chunk = false;
  430. h->reloc_reserved = false;
  431. h->sync = false;
  432. INIT_LIST_HEAD(&h->qgroup_ref_list);
  433. INIT_LIST_HEAD(&h->new_bgs);
  434. smp_mb();
  435. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  436. may_wait_transaction(root, type)) {
  437. btrfs_commit_transaction(h, root);
  438. goto again;
  439. }
  440. if (num_bytes) {
  441. trace_btrfs_space_reservation(root->fs_info, "transaction",
  442. h->transid, num_bytes, 1);
  443. h->block_rsv = &root->fs_info->trans_block_rsv;
  444. h->bytes_reserved = num_bytes;
  445. h->reloc_reserved = reloc_reserved;
  446. }
  447. h->qgroup_reserved = qgroup_reserved;
  448. got_it:
  449. btrfs_record_root_in_trans(h, root);
  450. if (!current->journal_info && type != TRANS_USERSPACE)
  451. current->journal_info = h;
  452. return h;
  453. join_fail:
  454. if (type & __TRANS_FREEZABLE)
  455. sb_end_intwrite(root->fs_info->sb);
  456. kmem_cache_free(btrfs_trans_handle_cachep, h);
  457. alloc_fail:
  458. if (num_bytes)
  459. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  460. num_bytes);
  461. reserve_fail:
  462. if (qgroup_reserved)
  463. btrfs_qgroup_free(root, qgroup_reserved);
  464. return ERR_PTR(ret);
  465. }
  466. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  467. int num_items)
  468. {
  469. return start_transaction(root, num_items, TRANS_START,
  470. BTRFS_RESERVE_FLUSH_ALL);
  471. }
  472. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  473. struct btrfs_root *root, int num_items)
  474. {
  475. return start_transaction(root, num_items, TRANS_START,
  476. BTRFS_RESERVE_FLUSH_LIMIT);
  477. }
  478. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  479. {
  480. return start_transaction(root, 0, TRANS_JOIN, 0);
  481. }
  482. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  483. {
  484. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  485. }
  486. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  487. {
  488. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  489. }
  490. /*
  491. * btrfs_attach_transaction() - catch the running transaction
  492. *
  493. * It is used when we want to commit the current the transaction, but
  494. * don't want to start a new one.
  495. *
  496. * Note: If this function return -ENOENT, it just means there is no
  497. * running transaction. But it is possible that the inactive transaction
  498. * is still in the memory, not fully on disk. If you hope there is no
  499. * inactive transaction in the fs when -ENOENT is returned, you should
  500. * invoke
  501. * btrfs_attach_transaction_barrier()
  502. */
  503. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  504. {
  505. return start_transaction(root, 0, TRANS_ATTACH, 0);
  506. }
  507. /*
  508. * btrfs_attach_transaction_barrier() - catch the running transaction
  509. *
  510. * It is similar to the above function, the differentia is this one
  511. * will wait for all the inactive transactions until they fully
  512. * complete.
  513. */
  514. struct btrfs_trans_handle *
  515. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  516. {
  517. struct btrfs_trans_handle *trans;
  518. trans = start_transaction(root, 0, TRANS_ATTACH, 0);
  519. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  520. btrfs_wait_for_commit(root, 0);
  521. return trans;
  522. }
  523. /* wait for a transaction commit to be fully complete */
  524. static noinline void wait_for_commit(struct btrfs_root *root,
  525. struct btrfs_transaction *commit)
  526. {
  527. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  528. }
  529. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  530. {
  531. struct btrfs_transaction *cur_trans = NULL, *t;
  532. int ret = 0;
  533. if (transid) {
  534. if (transid <= root->fs_info->last_trans_committed)
  535. goto out;
  536. ret = -EINVAL;
  537. /* find specified transaction */
  538. spin_lock(&root->fs_info->trans_lock);
  539. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  540. if (t->transid == transid) {
  541. cur_trans = t;
  542. atomic_inc(&cur_trans->use_count);
  543. ret = 0;
  544. break;
  545. }
  546. if (t->transid > transid) {
  547. ret = 0;
  548. break;
  549. }
  550. }
  551. spin_unlock(&root->fs_info->trans_lock);
  552. /* The specified transaction doesn't exist */
  553. if (!cur_trans)
  554. goto out;
  555. } else {
  556. /* find newest transaction that is committing | committed */
  557. spin_lock(&root->fs_info->trans_lock);
  558. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  559. list) {
  560. if (t->state >= TRANS_STATE_COMMIT_START) {
  561. if (t->state == TRANS_STATE_COMPLETED)
  562. break;
  563. cur_trans = t;
  564. atomic_inc(&cur_trans->use_count);
  565. break;
  566. }
  567. }
  568. spin_unlock(&root->fs_info->trans_lock);
  569. if (!cur_trans)
  570. goto out; /* nothing committing|committed */
  571. }
  572. wait_for_commit(root, cur_trans);
  573. btrfs_put_transaction(cur_trans);
  574. out:
  575. return ret;
  576. }
  577. void btrfs_throttle(struct btrfs_root *root)
  578. {
  579. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  580. wait_current_trans(root);
  581. }
  582. static int should_end_transaction(struct btrfs_trans_handle *trans,
  583. struct btrfs_root *root)
  584. {
  585. if (root->fs_info->global_block_rsv.space_info->full &&
  586. btrfs_check_space_for_delayed_refs(trans, root))
  587. return 1;
  588. return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  589. }
  590. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  591. struct btrfs_root *root)
  592. {
  593. struct btrfs_transaction *cur_trans = trans->transaction;
  594. int updates;
  595. int err;
  596. smp_mb();
  597. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  598. cur_trans->delayed_refs.flushing)
  599. return 1;
  600. updates = trans->delayed_ref_updates;
  601. trans->delayed_ref_updates = 0;
  602. if (updates) {
  603. err = btrfs_run_delayed_refs(trans, root, updates);
  604. if (err) /* Error code will also eval true */
  605. return err;
  606. }
  607. return should_end_transaction(trans, root);
  608. }
  609. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  610. struct btrfs_root *root, int throttle)
  611. {
  612. struct btrfs_transaction *cur_trans = trans->transaction;
  613. struct btrfs_fs_info *info = root->fs_info;
  614. unsigned long cur = trans->delayed_ref_updates;
  615. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  616. int err = 0;
  617. int must_run_delayed_refs = 0;
  618. if (trans->use_count > 1) {
  619. trans->use_count--;
  620. trans->block_rsv = trans->orig_rsv;
  621. return 0;
  622. }
  623. btrfs_trans_release_metadata(trans, root);
  624. trans->block_rsv = NULL;
  625. if (!list_empty(&trans->new_bgs))
  626. btrfs_create_pending_block_groups(trans, root);
  627. trans->delayed_ref_updates = 0;
  628. if (!trans->sync) {
  629. must_run_delayed_refs =
  630. btrfs_should_throttle_delayed_refs(trans, root);
  631. cur = max_t(unsigned long, cur, 32);
  632. /*
  633. * don't make the caller wait if they are from a NOLOCK
  634. * or ATTACH transaction, it will deadlock with commit
  635. */
  636. if (must_run_delayed_refs == 1 &&
  637. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  638. must_run_delayed_refs = 2;
  639. }
  640. if (trans->qgroup_reserved) {
  641. /*
  642. * the same root has to be passed here between start_transaction
  643. * and end_transaction. Subvolume quota depends on this.
  644. */
  645. btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
  646. trans->qgroup_reserved = 0;
  647. }
  648. btrfs_trans_release_metadata(trans, root);
  649. trans->block_rsv = NULL;
  650. if (!list_empty(&trans->new_bgs))
  651. btrfs_create_pending_block_groups(trans, root);
  652. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  653. should_end_transaction(trans, root) &&
  654. ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  655. spin_lock(&info->trans_lock);
  656. if (cur_trans->state == TRANS_STATE_RUNNING)
  657. cur_trans->state = TRANS_STATE_BLOCKED;
  658. spin_unlock(&info->trans_lock);
  659. }
  660. if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  661. if (throttle)
  662. return btrfs_commit_transaction(trans, root);
  663. else
  664. wake_up_process(info->transaction_kthread);
  665. }
  666. if (trans->type & __TRANS_FREEZABLE)
  667. sb_end_intwrite(root->fs_info->sb);
  668. WARN_ON(cur_trans != info->running_transaction);
  669. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  670. atomic_dec(&cur_trans->num_writers);
  671. extwriter_counter_dec(cur_trans, trans->type);
  672. smp_mb();
  673. if (waitqueue_active(&cur_trans->writer_wait))
  674. wake_up(&cur_trans->writer_wait);
  675. btrfs_put_transaction(cur_trans);
  676. if (current->journal_info == trans)
  677. current->journal_info = NULL;
  678. if (throttle)
  679. btrfs_run_delayed_iputs(root);
  680. if (trans->aborted ||
  681. test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  682. wake_up_process(info->transaction_kthread);
  683. err = -EIO;
  684. }
  685. assert_qgroups_uptodate(trans);
  686. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  687. if (must_run_delayed_refs) {
  688. btrfs_async_run_delayed_refs(root, cur,
  689. must_run_delayed_refs == 1);
  690. }
  691. return err;
  692. }
  693. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  694. struct btrfs_root *root)
  695. {
  696. return __btrfs_end_transaction(trans, root, 0);
  697. }
  698. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  699. struct btrfs_root *root)
  700. {
  701. return __btrfs_end_transaction(trans, root, 1);
  702. }
  703. /*
  704. * when btree blocks are allocated, they have some corresponding bits set for
  705. * them in one of two extent_io trees. This is used to make sure all of
  706. * those extents are sent to disk but does not wait on them
  707. */
  708. int btrfs_write_marked_extents(struct btrfs_root *root,
  709. struct extent_io_tree *dirty_pages, int mark)
  710. {
  711. int err = 0;
  712. int werr = 0;
  713. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  714. struct extent_state *cached_state = NULL;
  715. u64 start = 0;
  716. u64 end;
  717. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  718. mark, &cached_state)) {
  719. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  720. mark, &cached_state, GFP_NOFS);
  721. cached_state = NULL;
  722. err = filemap_fdatawrite_range(mapping, start, end);
  723. if (err)
  724. werr = err;
  725. cond_resched();
  726. start = end + 1;
  727. }
  728. if (err)
  729. werr = err;
  730. return werr;
  731. }
  732. /*
  733. * when btree blocks are allocated, they have some corresponding bits set for
  734. * them in one of two extent_io trees. This is used to make sure all of
  735. * those extents are on disk for transaction or log commit. We wait
  736. * on all the pages and clear them from the dirty pages state tree
  737. */
  738. int btrfs_wait_marked_extents(struct btrfs_root *root,
  739. struct extent_io_tree *dirty_pages, int mark)
  740. {
  741. int err = 0;
  742. int werr = 0;
  743. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  744. struct extent_state *cached_state = NULL;
  745. u64 start = 0;
  746. u64 end;
  747. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  748. EXTENT_NEED_WAIT, &cached_state)) {
  749. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  750. 0, 0, &cached_state, GFP_NOFS);
  751. err = filemap_fdatawait_range(mapping, start, end);
  752. if (err)
  753. werr = err;
  754. cond_resched();
  755. start = end + 1;
  756. }
  757. if (err)
  758. werr = err;
  759. return werr;
  760. }
  761. /*
  762. * when btree blocks are allocated, they have some corresponding bits set for
  763. * them in one of two extent_io trees. This is used to make sure all of
  764. * those extents are on disk for transaction or log commit
  765. */
  766. static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  767. struct extent_io_tree *dirty_pages, int mark)
  768. {
  769. int ret;
  770. int ret2;
  771. struct blk_plug plug;
  772. blk_start_plug(&plug);
  773. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  774. blk_finish_plug(&plug);
  775. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  776. if (ret)
  777. return ret;
  778. if (ret2)
  779. return ret2;
  780. return 0;
  781. }
  782. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  783. struct btrfs_root *root)
  784. {
  785. if (!trans || !trans->transaction) {
  786. struct inode *btree_inode;
  787. btree_inode = root->fs_info->btree_inode;
  788. return filemap_write_and_wait(btree_inode->i_mapping);
  789. }
  790. return btrfs_write_and_wait_marked_extents(root,
  791. &trans->transaction->dirty_pages,
  792. EXTENT_DIRTY);
  793. }
  794. /*
  795. * this is used to update the root pointer in the tree of tree roots.
  796. *
  797. * But, in the case of the extent allocation tree, updating the root
  798. * pointer may allocate blocks which may change the root of the extent
  799. * allocation tree.
  800. *
  801. * So, this loops and repeats and makes sure the cowonly root didn't
  802. * change while the root pointer was being updated in the metadata.
  803. */
  804. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  805. struct btrfs_root *root)
  806. {
  807. int ret;
  808. u64 old_root_bytenr;
  809. u64 old_root_used;
  810. struct btrfs_root *tree_root = root->fs_info->tree_root;
  811. old_root_used = btrfs_root_used(&root->root_item);
  812. btrfs_write_dirty_block_groups(trans, root);
  813. while (1) {
  814. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  815. if (old_root_bytenr == root->node->start &&
  816. old_root_used == btrfs_root_used(&root->root_item))
  817. break;
  818. btrfs_set_root_node(&root->root_item, root->node);
  819. ret = btrfs_update_root(trans, tree_root,
  820. &root->root_key,
  821. &root->root_item);
  822. if (ret)
  823. return ret;
  824. old_root_used = btrfs_root_used(&root->root_item);
  825. ret = btrfs_write_dirty_block_groups(trans, root);
  826. if (ret)
  827. return ret;
  828. }
  829. return 0;
  830. }
  831. /*
  832. * update all the cowonly tree roots on disk
  833. *
  834. * The error handling in this function may not be obvious. Any of the
  835. * failures will cause the file system to go offline. We still need
  836. * to clean up the delayed refs.
  837. */
  838. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  839. struct btrfs_root *root)
  840. {
  841. struct btrfs_fs_info *fs_info = root->fs_info;
  842. struct list_head *next;
  843. struct extent_buffer *eb;
  844. int ret;
  845. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  846. if (ret)
  847. return ret;
  848. eb = btrfs_lock_root_node(fs_info->tree_root);
  849. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  850. 0, &eb);
  851. btrfs_tree_unlock(eb);
  852. free_extent_buffer(eb);
  853. if (ret)
  854. return ret;
  855. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  856. if (ret)
  857. return ret;
  858. ret = btrfs_run_dev_stats(trans, root->fs_info);
  859. if (ret)
  860. return ret;
  861. ret = btrfs_run_dev_replace(trans, root->fs_info);
  862. if (ret)
  863. return ret;
  864. ret = btrfs_run_qgroups(trans, root->fs_info);
  865. if (ret)
  866. return ret;
  867. /* run_qgroups might have added some more refs */
  868. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  869. if (ret)
  870. return ret;
  871. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  872. next = fs_info->dirty_cowonly_roots.next;
  873. list_del_init(next);
  874. root = list_entry(next, struct btrfs_root, dirty_list);
  875. if (root != fs_info->extent_root)
  876. list_add_tail(&root->dirty_list,
  877. &trans->transaction->switch_commits);
  878. ret = update_cowonly_root(trans, root);
  879. if (ret)
  880. return ret;
  881. }
  882. list_add_tail(&fs_info->extent_root->dirty_list,
  883. &trans->transaction->switch_commits);
  884. btrfs_after_dev_replace_commit(fs_info);
  885. return 0;
  886. }
  887. /*
  888. * dead roots are old snapshots that need to be deleted. This allocates
  889. * a dirty root struct and adds it into the list of dead roots that need to
  890. * be deleted
  891. */
  892. void btrfs_add_dead_root(struct btrfs_root *root)
  893. {
  894. spin_lock(&root->fs_info->trans_lock);
  895. if (list_empty(&root->root_list))
  896. list_add_tail(&root->root_list, &root->fs_info->dead_roots);
  897. spin_unlock(&root->fs_info->trans_lock);
  898. }
  899. /*
  900. * update all the cowonly tree roots on disk
  901. */
  902. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  903. struct btrfs_root *root)
  904. {
  905. struct btrfs_root *gang[8];
  906. struct btrfs_fs_info *fs_info = root->fs_info;
  907. int i;
  908. int ret;
  909. int err = 0;
  910. spin_lock(&fs_info->fs_roots_radix_lock);
  911. while (1) {
  912. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  913. (void **)gang, 0,
  914. ARRAY_SIZE(gang),
  915. BTRFS_ROOT_TRANS_TAG);
  916. if (ret == 0)
  917. break;
  918. for (i = 0; i < ret; i++) {
  919. root = gang[i];
  920. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  921. (unsigned long)root->root_key.objectid,
  922. BTRFS_ROOT_TRANS_TAG);
  923. spin_unlock(&fs_info->fs_roots_radix_lock);
  924. btrfs_free_log(trans, root);
  925. btrfs_update_reloc_root(trans, root);
  926. btrfs_orphan_commit_root(trans, root);
  927. btrfs_save_ino_cache(root, trans);
  928. /* see comments in should_cow_block() */
  929. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  930. smp_mb__after_atomic();
  931. if (root->commit_root != root->node) {
  932. list_add_tail(&root->dirty_list,
  933. &trans->transaction->switch_commits);
  934. btrfs_set_root_node(&root->root_item,
  935. root->node);
  936. }
  937. err = btrfs_update_root(trans, fs_info->tree_root,
  938. &root->root_key,
  939. &root->root_item);
  940. spin_lock(&fs_info->fs_roots_radix_lock);
  941. if (err)
  942. break;
  943. }
  944. }
  945. spin_unlock(&fs_info->fs_roots_radix_lock);
  946. return err;
  947. }
  948. /*
  949. * defrag a given btree.
  950. * Every leaf in the btree is read and defragged.
  951. */
  952. int btrfs_defrag_root(struct btrfs_root *root)
  953. {
  954. struct btrfs_fs_info *info = root->fs_info;
  955. struct btrfs_trans_handle *trans;
  956. int ret;
  957. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  958. return 0;
  959. while (1) {
  960. trans = btrfs_start_transaction(root, 0);
  961. if (IS_ERR(trans))
  962. return PTR_ERR(trans);
  963. ret = btrfs_defrag_leaves(trans, root);
  964. btrfs_end_transaction(trans, root);
  965. btrfs_btree_balance_dirty(info->tree_root);
  966. cond_resched();
  967. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  968. break;
  969. if (btrfs_defrag_cancelled(root->fs_info)) {
  970. pr_debug("BTRFS: defrag_root cancelled\n");
  971. ret = -EAGAIN;
  972. break;
  973. }
  974. }
  975. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  976. return ret;
  977. }
  978. /*
  979. * new snapshots need to be created at a very specific time in the
  980. * transaction commit. This does the actual creation.
  981. *
  982. * Note:
  983. * If the error which may affect the commitment of the current transaction
  984. * happens, we should return the error number. If the error which just affect
  985. * the creation of the pending snapshots, just return 0.
  986. */
  987. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  988. struct btrfs_fs_info *fs_info,
  989. struct btrfs_pending_snapshot *pending)
  990. {
  991. struct btrfs_key key;
  992. struct btrfs_root_item *new_root_item;
  993. struct btrfs_root *tree_root = fs_info->tree_root;
  994. struct btrfs_root *root = pending->root;
  995. struct btrfs_root *parent_root;
  996. struct btrfs_block_rsv *rsv;
  997. struct inode *parent_inode;
  998. struct btrfs_path *path;
  999. struct btrfs_dir_item *dir_item;
  1000. struct dentry *dentry;
  1001. struct extent_buffer *tmp;
  1002. struct extent_buffer *old;
  1003. struct timespec cur_time = CURRENT_TIME;
  1004. int ret = 0;
  1005. u64 to_reserve = 0;
  1006. u64 index = 0;
  1007. u64 objectid;
  1008. u64 root_flags;
  1009. uuid_le new_uuid;
  1010. path = btrfs_alloc_path();
  1011. if (!path) {
  1012. pending->error = -ENOMEM;
  1013. return 0;
  1014. }
  1015. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  1016. if (!new_root_item) {
  1017. pending->error = -ENOMEM;
  1018. goto root_item_alloc_fail;
  1019. }
  1020. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1021. if (pending->error)
  1022. goto no_free_objectid;
  1023. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  1024. if (to_reserve > 0) {
  1025. pending->error = btrfs_block_rsv_add(root,
  1026. &pending->block_rsv,
  1027. to_reserve,
  1028. BTRFS_RESERVE_NO_FLUSH);
  1029. if (pending->error)
  1030. goto no_free_objectid;
  1031. }
  1032. key.objectid = objectid;
  1033. key.offset = (u64)-1;
  1034. key.type = BTRFS_ROOT_ITEM_KEY;
  1035. rsv = trans->block_rsv;
  1036. trans->block_rsv = &pending->block_rsv;
  1037. trans->bytes_reserved = trans->block_rsv->reserved;
  1038. dentry = pending->dentry;
  1039. parent_inode = pending->dir;
  1040. parent_root = BTRFS_I(parent_inode)->root;
  1041. record_root_in_trans(trans, parent_root);
  1042. /*
  1043. * insert the directory item
  1044. */
  1045. ret = btrfs_set_inode_index(parent_inode, &index);
  1046. BUG_ON(ret); /* -ENOMEM */
  1047. /* check if there is a file/dir which has the same name. */
  1048. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1049. btrfs_ino(parent_inode),
  1050. dentry->d_name.name,
  1051. dentry->d_name.len, 0);
  1052. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1053. pending->error = -EEXIST;
  1054. goto dir_item_existed;
  1055. } else if (IS_ERR(dir_item)) {
  1056. ret = PTR_ERR(dir_item);
  1057. btrfs_abort_transaction(trans, root, ret);
  1058. goto fail;
  1059. }
  1060. btrfs_release_path(path);
  1061. /*
  1062. * pull in the delayed directory update
  1063. * and the delayed inode item
  1064. * otherwise we corrupt the FS during
  1065. * snapshot
  1066. */
  1067. ret = btrfs_run_delayed_items(trans, root);
  1068. if (ret) { /* Transaction aborted */
  1069. btrfs_abort_transaction(trans, root, ret);
  1070. goto fail;
  1071. }
  1072. record_root_in_trans(trans, root);
  1073. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1074. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1075. btrfs_check_and_init_root_item(new_root_item);
  1076. root_flags = btrfs_root_flags(new_root_item);
  1077. if (pending->readonly)
  1078. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1079. else
  1080. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1081. btrfs_set_root_flags(new_root_item, root_flags);
  1082. btrfs_set_root_generation_v2(new_root_item,
  1083. trans->transid);
  1084. uuid_le_gen(&new_uuid);
  1085. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1086. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1087. BTRFS_UUID_SIZE);
  1088. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1089. memset(new_root_item->received_uuid, 0,
  1090. sizeof(new_root_item->received_uuid));
  1091. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1092. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1093. btrfs_set_root_stransid(new_root_item, 0);
  1094. btrfs_set_root_rtransid(new_root_item, 0);
  1095. }
  1096. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1097. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1098. btrfs_set_root_otransid(new_root_item, trans->transid);
  1099. old = btrfs_lock_root_node(root);
  1100. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1101. if (ret) {
  1102. btrfs_tree_unlock(old);
  1103. free_extent_buffer(old);
  1104. btrfs_abort_transaction(trans, root, ret);
  1105. goto fail;
  1106. }
  1107. btrfs_set_lock_blocking(old);
  1108. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1109. /* clean up in any case */
  1110. btrfs_tree_unlock(old);
  1111. free_extent_buffer(old);
  1112. if (ret) {
  1113. btrfs_abort_transaction(trans, root, ret);
  1114. goto fail;
  1115. }
  1116. /*
  1117. * We need to flush delayed refs in order to make sure all of our quota
  1118. * operations have been done before we call btrfs_qgroup_inherit.
  1119. */
  1120. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1121. if (ret) {
  1122. btrfs_abort_transaction(trans, root, ret);
  1123. goto fail;
  1124. }
  1125. ret = btrfs_qgroup_inherit(trans, fs_info,
  1126. root->root_key.objectid,
  1127. objectid, pending->inherit);
  1128. if (ret) {
  1129. btrfs_abort_transaction(trans, root, ret);
  1130. goto fail;
  1131. }
  1132. /* see comments in should_cow_block() */
  1133. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1134. smp_wmb();
  1135. btrfs_set_root_node(new_root_item, tmp);
  1136. /* record when the snapshot was created in key.offset */
  1137. key.offset = trans->transid;
  1138. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1139. btrfs_tree_unlock(tmp);
  1140. free_extent_buffer(tmp);
  1141. if (ret) {
  1142. btrfs_abort_transaction(trans, root, ret);
  1143. goto fail;
  1144. }
  1145. /*
  1146. * insert root back/forward references
  1147. */
  1148. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1149. parent_root->root_key.objectid,
  1150. btrfs_ino(parent_inode), index,
  1151. dentry->d_name.name, dentry->d_name.len);
  1152. if (ret) {
  1153. btrfs_abort_transaction(trans, root, ret);
  1154. goto fail;
  1155. }
  1156. key.offset = (u64)-1;
  1157. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1158. if (IS_ERR(pending->snap)) {
  1159. ret = PTR_ERR(pending->snap);
  1160. btrfs_abort_transaction(trans, root, ret);
  1161. goto fail;
  1162. }
  1163. ret = btrfs_reloc_post_snapshot(trans, pending);
  1164. if (ret) {
  1165. btrfs_abort_transaction(trans, root, ret);
  1166. goto fail;
  1167. }
  1168. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1169. if (ret) {
  1170. btrfs_abort_transaction(trans, root, ret);
  1171. goto fail;
  1172. }
  1173. ret = btrfs_insert_dir_item(trans, parent_root,
  1174. dentry->d_name.name, dentry->d_name.len,
  1175. parent_inode, &key,
  1176. BTRFS_FT_DIR, index);
  1177. /* We have check then name at the beginning, so it is impossible. */
  1178. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1179. if (ret) {
  1180. btrfs_abort_transaction(trans, root, ret);
  1181. goto fail;
  1182. }
  1183. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1184. dentry->d_name.len * 2);
  1185. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1186. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1187. if (ret) {
  1188. btrfs_abort_transaction(trans, root, ret);
  1189. goto fail;
  1190. }
  1191. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
  1192. BTRFS_UUID_KEY_SUBVOL, objectid);
  1193. if (ret) {
  1194. btrfs_abort_transaction(trans, root, ret);
  1195. goto fail;
  1196. }
  1197. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1198. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
  1199. new_root_item->received_uuid,
  1200. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1201. objectid);
  1202. if (ret && ret != -EEXIST) {
  1203. btrfs_abort_transaction(trans, root, ret);
  1204. goto fail;
  1205. }
  1206. }
  1207. fail:
  1208. pending->error = ret;
  1209. dir_item_existed:
  1210. trans->block_rsv = rsv;
  1211. trans->bytes_reserved = 0;
  1212. no_free_objectid:
  1213. kfree(new_root_item);
  1214. root_item_alloc_fail:
  1215. btrfs_free_path(path);
  1216. return ret;
  1217. }
  1218. /*
  1219. * create all the snapshots we've scheduled for creation
  1220. */
  1221. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1222. struct btrfs_fs_info *fs_info)
  1223. {
  1224. struct btrfs_pending_snapshot *pending, *next;
  1225. struct list_head *head = &trans->transaction->pending_snapshots;
  1226. int ret = 0;
  1227. list_for_each_entry_safe(pending, next, head, list) {
  1228. list_del(&pending->list);
  1229. ret = create_pending_snapshot(trans, fs_info, pending);
  1230. if (ret)
  1231. break;
  1232. }
  1233. return ret;
  1234. }
  1235. static void update_super_roots(struct btrfs_root *root)
  1236. {
  1237. struct btrfs_root_item *root_item;
  1238. struct btrfs_super_block *super;
  1239. super = root->fs_info->super_copy;
  1240. root_item = &root->fs_info->chunk_root->root_item;
  1241. super->chunk_root = root_item->bytenr;
  1242. super->chunk_root_generation = root_item->generation;
  1243. super->chunk_root_level = root_item->level;
  1244. root_item = &root->fs_info->tree_root->root_item;
  1245. super->root = root_item->bytenr;
  1246. super->generation = root_item->generation;
  1247. super->root_level = root_item->level;
  1248. if (btrfs_test_opt(root, SPACE_CACHE))
  1249. super->cache_generation = root_item->generation;
  1250. if (root->fs_info->update_uuid_tree_gen)
  1251. super->uuid_tree_generation = root_item->generation;
  1252. }
  1253. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1254. {
  1255. struct btrfs_transaction *trans;
  1256. int ret = 0;
  1257. spin_lock(&info->trans_lock);
  1258. trans = info->running_transaction;
  1259. if (trans)
  1260. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1261. spin_unlock(&info->trans_lock);
  1262. return ret;
  1263. }
  1264. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1265. {
  1266. struct btrfs_transaction *trans;
  1267. int ret = 0;
  1268. spin_lock(&info->trans_lock);
  1269. trans = info->running_transaction;
  1270. if (trans)
  1271. ret = is_transaction_blocked(trans);
  1272. spin_unlock(&info->trans_lock);
  1273. return ret;
  1274. }
  1275. /*
  1276. * wait for the current transaction commit to start and block subsequent
  1277. * transaction joins
  1278. */
  1279. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1280. struct btrfs_transaction *trans)
  1281. {
  1282. wait_event(root->fs_info->transaction_blocked_wait,
  1283. trans->state >= TRANS_STATE_COMMIT_START ||
  1284. trans->aborted);
  1285. }
  1286. /*
  1287. * wait for the current transaction to start and then become unblocked.
  1288. * caller holds ref.
  1289. */
  1290. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1291. struct btrfs_transaction *trans)
  1292. {
  1293. wait_event(root->fs_info->transaction_wait,
  1294. trans->state >= TRANS_STATE_UNBLOCKED ||
  1295. trans->aborted);
  1296. }
  1297. /*
  1298. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1299. * returns, any subsequent transaction will not be allowed to join.
  1300. */
  1301. struct btrfs_async_commit {
  1302. struct btrfs_trans_handle *newtrans;
  1303. struct btrfs_root *root;
  1304. struct work_struct work;
  1305. };
  1306. static void do_async_commit(struct work_struct *work)
  1307. {
  1308. struct btrfs_async_commit *ac =
  1309. container_of(work, struct btrfs_async_commit, work);
  1310. /*
  1311. * We've got freeze protection passed with the transaction.
  1312. * Tell lockdep about it.
  1313. */
  1314. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1315. rwsem_acquire_read(
  1316. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1317. 0, 1, _THIS_IP_);
  1318. current->journal_info = ac->newtrans;
  1319. btrfs_commit_transaction(ac->newtrans, ac->root);
  1320. kfree(ac);
  1321. }
  1322. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1323. struct btrfs_root *root,
  1324. int wait_for_unblock)
  1325. {
  1326. struct btrfs_async_commit *ac;
  1327. struct btrfs_transaction *cur_trans;
  1328. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1329. if (!ac)
  1330. return -ENOMEM;
  1331. INIT_WORK(&ac->work, do_async_commit);
  1332. ac->root = root;
  1333. ac->newtrans = btrfs_join_transaction(root);
  1334. if (IS_ERR(ac->newtrans)) {
  1335. int err = PTR_ERR(ac->newtrans);
  1336. kfree(ac);
  1337. return err;
  1338. }
  1339. /* take transaction reference */
  1340. cur_trans = trans->transaction;
  1341. atomic_inc(&cur_trans->use_count);
  1342. btrfs_end_transaction(trans, root);
  1343. /*
  1344. * Tell lockdep we've released the freeze rwsem, since the
  1345. * async commit thread will be the one to unlock it.
  1346. */
  1347. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1348. rwsem_release(
  1349. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1350. 1, _THIS_IP_);
  1351. schedule_work(&ac->work);
  1352. /* wait for transaction to start and unblock */
  1353. if (wait_for_unblock)
  1354. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1355. else
  1356. wait_current_trans_commit_start(root, cur_trans);
  1357. if (current->journal_info == trans)
  1358. current->journal_info = NULL;
  1359. btrfs_put_transaction(cur_trans);
  1360. return 0;
  1361. }
  1362. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1363. struct btrfs_root *root, int err)
  1364. {
  1365. struct btrfs_transaction *cur_trans = trans->transaction;
  1366. DEFINE_WAIT(wait);
  1367. WARN_ON(trans->use_count > 1);
  1368. btrfs_abort_transaction(trans, root, err);
  1369. spin_lock(&root->fs_info->trans_lock);
  1370. /*
  1371. * If the transaction is removed from the list, it means this
  1372. * transaction has been committed successfully, so it is impossible
  1373. * to call the cleanup function.
  1374. */
  1375. BUG_ON(list_empty(&cur_trans->list));
  1376. list_del_init(&cur_trans->list);
  1377. if (cur_trans == root->fs_info->running_transaction) {
  1378. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1379. spin_unlock(&root->fs_info->trans_lock);
  1380. wait_event(cur_trans->writer_wait,
  1381. atomic_read(&cur_trans->num_writers) == 1);
  1382. spin_lock(&root->fs_info->trans_lock);
  1383. }
  1384. spin_unlock(&root->fs_info->trans_lock);
  1385. btrfs_cleanup_one_transaction(trans->transaction, root);
  1386. spin_lock(&root->fs_info->trans_lock);
  1387. if (cur_trans == root->fs_info->running_transaction)
  1388. root->fs_info->running_transaction = NULL;
  1389. spin_unlock(&root->fs_info->trans_lock);
  1390. if (trans->type & __TRANS_FREEZABLE)
  1391. sb_end_intwrite(root->fs_info->sb);
  1392. btrfs_put_transaction(cur_trans);
  1393. btrfs_put_transaction(cur_trans);
  1394. trace_btrfs_transaction_commit(root);
  1395. if (current->journal_info == trans)
  1396. current->journal_info = NULL;
  1397. btrfs_scrub_cancel(root->fs_info);
  1398. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1399. }
  1400. static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
  1401. struct btrfs_root *root)
  1402. {
  1403. int ret;
  1404. ret = btrfs_run_delayed_items(trans, root);
  1405. /*
  1406. * running the delayed items may have added new refs. account
  1407. * them now so that they hinder processing of more delayed refs
  1408. * as little as possible.
  1409. */
  1410. if (ret)
  1411. return ret;
  1412. /*
  1413. * rename don't use btrfs_join_transaction, so, once we
  1414. * set the transaction to blocked above, we aren't going
  1415. * to get any new ordered operations. We can safely run
  1416. * it here and no for sure that nothing new will be added
  1417. * to the list
  1418. */
  1419. ret = btrfs_run_ordered_operations(trans, root, 1);
  1420. return ret;
  1421. }
  1422. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1423. {
  1424. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1425. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1426. return 0;
  1427. }
  1428. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1429. {
  1430. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1431. btrfs_wait_ordered_roots(fs_info, -1);
  1432. }
  1433. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1434. struct btrfs_root *root)
  1435. {
  1436. struct btrfs_transaction *cur_trans = trans->transaction;
  1437. struct btrfs_transaction *prev_trans = NULL;
  1438. int ret;
  1439. ret = btrfs_run_ordered_operations(trans, root, 0);
  1440. if (ret) {
  1441. btrfs_abort_transaction(trans, root, ret);
  1442. btrfs_end_transaction(trans, root);
  1443. return ret;
  1444. }
  1445. /* Stop the commit early if ->aborted is set */
  1446. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1447. ret = cur_trans->aborted;
  1448. btrfs_end_transaction(trans, root);
  1449. return ret;
  1450. }
  1451. /* make a pass through all the delayed refs we have so far
  1452. * any runnings procs may add more while we are here
  1453. */
  1454. ret = btrfs_run_delayed_refs(trans, root, 0);
  1455. if (ret) {
  1456. btrfs_end_transaction(trans, root);
  1457. return ret;
  1458. }
  1459. btrfs_trans_release_metadata(trans, root);
  1460. trans->block_rsv = NULL;
  1461. if (trans->qgroup_reserved) {
  1462. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1463. trans->qgroup_reserved = 0;
  1464. }
  1465. cur_trans = trans->transaction;
  1466. /*
  1467. * set the flushing flag so procs in this transaction have to
  1468. * start sending their work down.
  1469. */
  1470. cur_trans->delayed_refs.flushing = 1;
  1471. smp_wmb();
  1472. if (!list_empty(&trans->new_bgs))
  1473. btrfs_create_pending_block_groups(trans, root);
  1474. ret = btrfs_run_delayed_refs(trans, root, 0);
  1475. if (ret) {
  1476. btrfs_end_transaction(trans, root);
  1477. return ret;
  1478. }
  1479. spin_lock(&root->fs_info->trans_lock);
  1480. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1481. spin_unlock(&root->fs_info->trans_lock);
  1482. atomic_inc(&cur_trans->use_count);
  1483. ret = btrfs_end_transaction(trans, root);
  1484. wait_for_commit(root, cur_trans);
  1485. btrfs_put_transaction(cur_trans);
  1486. return ret;
  1487. }
  1488. cur_trans->state = TRANS_STATE_COMMIT_START;
  1489. wake_up(&root->fs_info->transaction_blocked_wait);
  1490. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1491. prev_trans = list_entry(cur_trans->list.prev,
  1492. struct btrfs_transaction, list);
  1493. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1494. atomic_inc(&prev_trans->use_count);
  1495. spin_unlock(&root->fs_info->trans_lock);
  1496. wait_for_commit(root, prev_trans);
  1497. btrfs_put_transaction(prev_trans);
  1498. } else {
  1499. spin_unlock(&root->fs_info->trans_lock);
  1500. }
  1501. } else {
  1502. spin_unlock(&root->fs_info->trans_lock);
  1503. }
  1504. extwriter_counter_dec(cur_trans, trans->type);
  1505. ret = btrfs_start_delalloc_flush(root->fs_info);
  1506. if (ret)
  1507. goto cleanup_transaction;
  1508. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1509. if (ret)
  1510. goto cleanup_transaction;
  1511. wait_event(cur_trans->writer_wait,
  1512. extwriter_counter_read(cur_trans) == 0);
  1513. /* some pending stuffs might be added after the previous flush. */
  1514. ret = btrfs_flush_all_pending_stuffs(trans, root);
  1515. if (ret)
  1516. goto cleanup_transaction;
  1517. btrfs_wait_delalloc_flush(root->fs_info);
  1518. btrfs_scrub_pause(root);
  1519. /*
  1520. * Ok now we need to make sure to block out any other joins while we
  1521. * commit the transaction. We could have started a join before setting
  1522. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1523. */
  1524. spin_lock(&root->fs_info->trans_lock);
  1525. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1526. spin_unlock(&root->fs_info->trans_lock);
  1527. wait_event(cur_trans->writer_wait,
  1528. atomic_read(&cur_trans->num_writers) == 1);
  1529. /* ->aborted might be set after the previous check, so check it */
  1530. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1531. ret = cur_trans->aborted;
  1532. goto scrub_continue;
  1533. }
  1534. /*
  1535. * the reloc mutex makes sure that we stop
  1536. * the balancing code from coming in and moving
  1537. * extents around in the middle of the commit
  1538. */
  1539. mutex_lock(&root->fs_info->reloc_mutex);
  1540. /*
  1541. * We needn't worry about the delayed items because we will
  1542. * deal with them in create_pending_snapshot(), which is the
  1543. * core function of the snapshot creation.
  1544. */
  1545. ret = create_pending_snapshots(trans, root->fs_info);
  1546. if (ret) {
  1547. mutex_unlock(&root->fs_info->reloc_mutex);
  1548. goto scrub_continue;
  1549. }
  1550. /*
  1551. * We insert the dir indexes of the snapshots and update the inode
  1552. * of the snapshots' parents after the snapshot creation, so there
  1553. * are some delayed items which are not dealt with. Now deal with
  1554. * them.
  1555. *
  1556. * We needn't worry that this operation will corrupt the snapshots,
  1557. * because all the tree which are snapshoted will be forced to COW
  1558. * the nodes and leaves.
  1559. */
  1560. ret = btrfs_run_delayed_items(trans, root);
  1561. if (ret) {
  1562. mutex_unlock(&root->fs_info->reloc_mutex);
  1563. goto scrub_continue;
  1564. }
  1565. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1566. if (ret) {
  1567. mutex_unlock(&root->fs_info->reloc_mutex);
  1568. goto scrub_continue;
  1569. }
  1570. /*
  1571. * make sure none of the code above managed to slip in a
  1572. * delayed item
  1573. */
  1574. btrfs_assert_delayed_root_empty(root);
  1575. WARN_ON(cur_trans != trans->transaction);
  1576. /* btrfs_commit_tree_roots is responsible for getting the
  1577. * various roots consistent with each other. Every pointer
  1578. * in the tree of tree roots has to point to the most up to date
  1579. * root for every subvolume and other tree. So, we have to keep
  1580. * the tree logging code from jumping in and changing any
  1581. * of the trees.
  1582. *
  1583. * At this point in the commit, there can't be any tree-log
  1584. * writers, but a little lower down we drop the trans mutex
  1585. * and let new people in. By holding the tree_log_mutex
  1586. * from now until after the super is written, we avoid races
  1587. * with the tree-log code.
  1588. */
  1589. mutex_lock(&root->fs_info->tree_log_mutex);
  1590. ret = commit_fs_roots(trans, root);
  1591. if (ret) {
  1592. mutex_unlock(&root->fs_info->tree_log_mutex);
  1593. mutex_unlock(&root->fs_info->reloc_mutex);
  1594. goto scrub_continue;
  1595. }
  1596. /*
  1597. * Since the transaction is done, we should set the inode map cache flag
  1598. * before any other comming transaction.
  1599. */
  1600. if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
  1601. btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
  1602. else
  1603. btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
  1604. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1605. * safe to free the root of tree log roots
  1606. */
  1607. btrfs_free_log_root_tree(trans, root->fs_info);
  1608. ret = commit_cowonly_roots(trans, root);
  1609. if (ret) {
  1610. mutex_unlock(&root->fs_info->tree_log_mutex);
  1611. mutex_unlock(&root->fs_info->reloc_mutex);
  1612. goto scrub_continue;
  1613. }
  1614. /*
  1615. * The tasks which save the space cache and inode cache may also
  1616. * update ->aborted, check it.
  1617. */
  1618. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1619. ret = cur_trans->aborted;
  1620. mutex_unlock(&root->fs_info->tree_log_mutex);
  1621. mutex_unlock(&root->fs_info->reloc_mutex);
  1622. goto scrub_continue;
  1623. }
  1624. btrfs_prepare_extent_commit(trans, root);
  1625. cur_trans = root->fs_info->running_transaction;
  1626. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1627. root->fs_info->tree_root->node);
  1628. list_add_tail(&root->fs_info->tree_root->dirty_list,
  1629. &cur_trans->switch_commits);
  1630. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1631. root->fs_info->chunk_root->node);
  1632. list_add_tail(&root->fs_info->chunk_root->dirty_list,
  1633. &cur_trans->switch_commits);
  1634. switch_commit_roots(cur_trans, root->fs_info);
  1635. assert_qgroups_uptodate(trans);
  1636. update_super_roots(root);
  1637. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1638. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1639. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1640. sizeof(*root->fs_info->super_copy));
  1641. spin_lock(&root->fs_info->trans_lock);
  1642. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1643. root->fs_info->running_transaction = NULL;
  1644. spin_unlock(&root->fs_info->trans_lock);
  1645. mutex_unlock(&root->fs_info->reloc_mutex);
  1646. wake_up(&root->fs_info->transaction_wait);
  1647. ret = btrfs_write_and_wait_transaction(trans, root);
  1648. if (ret) {
  1649. btrfs_error(root->fs_info, ret,
  1650. "Error while writing out transaction");
  1651. mutex_unlock(&root->fs_info->tree_log_mutex);
  1652. goto scrub_continue;
  1653. }
  1654. ret = write_ctree_super(trans, root, 0);
  1655. if (ret) {
  1656. mutex_unlock(&root->fs_info->tree_log_mutex);
  1657. goto scrub_continue;
  1658. }
  1659. /*
  1660. * the super is written, we can safely allow the tree-loggers
  1661. * to go about their business
  1662. */
  1663. mutex_unlock(&root->fs_info->tree_log_mutex);
  1664. btrfs_finish_extent_commit(trans, root);
  1665. root->fs_info->last_trans_committed = cur_trans->transid;
  1666. /*
  1667. * We needn't acquire the lock here because there is no other task
  1668. * which can change it.
  1669. */
  1670. cur_trans->state = TRANS_STATE_COMPLETED;
  1671. wake_up(&cur_trans->commit_wait);
  1672. spin_lock(&root->fs_info->trans_lock);
  1673. list_del_init(&cur_trans->list);
  1674. spin_unlock(&root->fs_info->trans_lock);
  1675. btrfs_put_transaction(cur_trans);
  1676. btrfs_put_transaction(cur_trans);
  1677. if (trans->type & __TRANS_FREEZABLE)
  1678. sb_end_intwrite(root->fs_info->sb);
  1679. trace_btrfs_transaction_commit(root);
  1680. btrfs_scrub_continue(root);
  1681. if (current->journal_info == trans)
  1682. current->journal_info = NULL;
  1683. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1684. if (current != root->fs_info->transaction_kthread)
  1685. btrfs_run_delayed_iputs(root);
  1686. return ret;
  1687. scrub_continue:
  1688. btrfs_scrub_continue(root);
  1689. cleanup_transaction:
  1690. btrfs_trans_release_metadata(trans, root);
  1691. trans->block_rsv = NULL;
  1692. if (trans->qgroup_reserved) {
  1693. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1694. trans->qgroup_reserved = 0;
  1695. }
  1696. btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
  1697. if (current->journal_info == trans)
  1698. current->journal_info = NULL;
  1699. cleanup_transaction(trans, root, ret);
  1700. return ret;
  1701. }
  1702. /*
  1703. * return < 0 if error
  1704. * 0 if there are no more dead_roots at the time of call
  1705. * 1 there are more to be processed, call me again
  1706. *
  1707. * The return value indicates there are certainly more snapshots to delete, but
  1708. * if there comes a new one during processing, it may return 0. We don't mind,
  1709. * because btrfs_commit_super will poke cleaner thread and it will process it a
  1710. * few seconds later.
  1711. */
  1712. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  1713. {
  1714. int ret;
  1715. struct btrfs_fs_info *fs_info = root->fs_info;
  1716. spin_lock(&fs_info->trans_lock);
  1717. if (list_empty(&fs_info->dead_roots)) {
  1718. spin_unlock(&fs_info->trans_lock);
  1719. return 0;
  1720. }
  1721. root = list_first_entry(&fs_info->dead_roots,
  1722. struct btrfs_root, root_list);
  1723. list_del_init(&root->root_list);
  1724. spin_unlock(&fs_info->trans_lock);
  1725. pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
  1726. btrfs_kill_all_delayed_nodes(root);
  1727. if (btrfs_header_backref_rev(root->node) <
  1728. BTRFS_MIXED_BACKREF_REV)
  1729. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1730. else
  1731. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  1732. /*
  1733. * If we encounter a transaction abort during snapshot cleaning, we
  1734. * don't want to crash here
  1735. */
  1736. return (ret < 0) ? 0 : 1;
  1737. }