transaction.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(atomic_read(&transaction->use_count) == 0);
  60. if (atomic_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. if (transaction->delayed_refs.pending_csums)
  64. printk(KERN_ERR "pending csums is %llu\n",
  65. transaction->delayed_refs.pending_csums);
  66. while (!list_empty(&transaction->pending_chunks)) {
  67. struct extent_map *em;
  68. em = list_first_entry(&transaction->pending_chunks,
  69. struct extent_map, list);
  70. list_del_init(&em->list);
  71. free_extent_map(em);
  72. }
  73. /*
  74. * If any block groups are found in ->deleted_bgs then it's
  75. * because the transaction was aborted and a commit did not
  76. * happen (things failed before writing the new superblock
  77. * and calling btrfs_finish_extent_commit()), so we can not
  78. * discard the physical locations of the block groups.
  79. */
  80. while (!list_empty(&transaction->deleted_bgs)) {
  81. struct btrfs_block_group_cache *cache;
  82. cache = list_first_entry(&transaction->deleted_bgs,
  83. struct btrfs_block_group_cache,
  84. bg_list);
  85. list_del_init(&cache->bg_list);
  86. btrfs_put_block_group_trimming(cache);
  87. btrfs_put_block_group(cache);
  88. }
  89. kmem_cache_free(btrfs_transaction_cachep, transaction);
  90. }
  91. }
  92. static void clear_btree_io_tree(struct extent_io_tree *tree)
  93. {
  94. spin_lock(&tree->lock);
  95. /*
  96. * Do a single barrier for the waitqueue_active check here, the state
  97. * of the waitqueue should not change once clear_btree_io_tree is
  98. * called.
  99. */
  100. smp_mb();
  101. while (!RB_EMPTY_ROOT(&tree->state)) {
  102. struct rb_node *node;
  103. struct extent_state *state;
  104. node = rb_first(&tree->state);
  105. state = rb_entry(node, struct extent_state, rb_node);
  106. rb_erase(&state->rb_node, &tree->state);
  107. RB_CLEAR_NODE(&state->rb_node);
  108. /*
  109. * btree io trees aren't supposed to have tasks waiting for
  110. * changes in the flags of extent states ever.
  111. */
  112. ASSERT(!waitqueue_active(&state->wq));
  113. free_extent_state(state);
  114. cond_resched_lock(&tree->lock);
  115. }
  116. spin_unlock(&tree->lock);
  117. }
  118. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  119. struct btrfs_fs_info *fs_info)
  120. {
  121. struct btrfs_root *root, *tmp;
  122. down_write(&fs_info->commit_root_sem);
  123. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  124. dirty_list) {
  125. list_del_init(&root->dirty_list);
  126. free_extent_buffer(root->commit_root);
  127. root->commit_root = btrfs_root_node(root);
  128. if (is_fstree(root->objectid))
  129. btrfs_unpin_free_ino(root);
  130. clear_btree_io_tree(&root->dirty_log_pages);
  131. }
  132. /* We can free old roots now. */
  133. spin_lock(&trans->dropped_roots_lock);
  134. while (!list_empty(&trans->dropped_roots)) {
  135. root = list_first_entry(&trans->dropped_roots,
  136. struct btrfs_root, root_list);
  137. list_del_init(&root->root_list);
  138. spin_unlock(&trans->dropped_roots_lock);
  139. btrfs_drop_and_free_fs_root(fs_info, root);
  140. spin_lock(&trans->dropped_roots_lock);
  141. }
  142. spin_unlock(&trans->dropped_roots_lock);
  143. up_write(&fs_info->commit_root_sem);
  144. }
  145. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  146. unsigned int type)
  147. {
  148. if (type & TRANS_EXTWRITERS)
  149. atomic_inc(&trans->num_extwriters);
  150. }
  151. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  152. unsigned int type)
  153. {
  154. if (type & TRANS_EXTWRITERS)
  155. atomic_dec(&trans->num_extwriters);
  156. }
  157. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  158. unsigned int type)
  159. {
  160. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  161. }
  162. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  163. {
  164. return atomic_read(&trans->num_extwriters);
  165. }
  166. /*
  167. * either allocate a new transaction or hop into the existing one
  168. */
  169. static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
  170. {
  171. struct btrfs_transaction *cur_trans;
  172. struct btrfs_fs_info *fs_info = root->fs_info;
  173. spin_lock(&fs_info->trans_lock);
  174. loop:
  175. /* The file system has been taken offline. No new transactions. */
  176. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  177. spin_unlock(&fs_info->trans_lock);
  178. return -EROFS;
  179. }
  180. cur_trans = fs_info->running_transaction;
  181. if (cur_trans) {
  182. if (cur_trans->aborted) {
  183. spin_unlock(&fs_info->trans_lock);
  184. return cur_trans->aborted;
  185. }
  186. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  187. spin_unlock(&fs_info->trans_lock);
  188. return -EBUSY;
  189. }
  190. atomic_inc(&cur_trans->use_count);
  191. atomic_inc(&cur_trans->num_writers);
  192. extwriter_counter_inc(cur_trans, type);
  193. spin_unlock(&fs_info->trans_lock);
  194. return 0;
  195. }
  196. spin_unlock(&fs_info->trans_lock);
  197. /*
  198. * If we are ATTACH, we just want to catch the current transaction,
  199. * and commit it. If there is no transaction, just return ENOENT.
  200. */
  201. if (type == TRANS_ATTACH)
  202. return -ENOENT;
  203. /*
  204. * JOIN_NOLOCK only happens during the transaction commit, so
  205. * it is impossible that ->running_transaction is NULL
  206. */
  207. BUG_ON(type == TRANS_JOIN_NOLOCK);
  208. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  209. if (!cur_trans)
  210. return -ENOMEM;
  211. spin_lock(&fs_info->trans_lock);
  212. if (fs_info->running_transaction) {
  213. /*
  214. * someone started a transaction after we unlocked. Make sure
  215. * to redo the checks above
  216. */
  217. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  218. goto loop;
  219. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  220. spin_unlock(&fs_info->trans_lock);
  221. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  222. return -EROFS;
  223. }
  224. atomic_set(&cur_trans->num_writers, 1);
  225. extwriter_counter_init(cur_trans, type);
  226. init_waitqueue_head(&cur_trans->writer_wait);
  227. init_waitqueue_head(&cur_trans->commit_wait);
  228. init_waitqueue_head(&cur_trans->pending_wait);
  229. cur_trans->state = TRANS_STATE_RUNNING;
  230. /*
  231. * One for this trans handle, one so it will live on until we
  232. * commit the transaction.
  233. */
  234. atomic_set(&cur_trans->use_count, 2);
  235. atomic_set(&cur_trans->pending_ordered, 0);
  236. cur_trans->flags = 0;
  237. cur_trans->start_time = get_seconds();
  238. memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
  239. cur_trans->delayed_refs.href_root = RB_ROOT;
  240. cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
  241. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  242. /*
  243. * although the tree mod log is per file system and not per transaction,
  244. * the log must never go across transaction boundaries.
  245. */
  246. smp_mb();
  247. if (!list_empty(&fs_info->tree_mod_seq_list))
  248. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
  249. "creating a fresh transaction\n");
  250. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  251. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
  252. "creating a fresh transaction\n");
  253. atomic64_set(&fs_info->tree_mod_seq, 0);
  254. spin_lock_init(&cur_trans->delayed_refs.lock);
  255. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  256. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  257. INIT_LIST_HEAD(&cur_trans->switch_commits);
  258. INIT_LIST_HEAD(&cur_trans->dirty_bgs);
  259. INIT_LIST_HEAD(&cur_trans->io_bgs);
  260. INIT_LIST_HEAD(&cur_trans->dropped_roots);
  261. mutex_init(&cur_trans->cache_write_mutex);
  262. cur_trans->num_dirty_bgs = 0;
  263. spin_lock_init(&cur_trans->dirty_bgs_lock);
  264. INIT_LIST_HEAD(&cur_trans->deleted_bgs);
  265. spin_lock_init(&cur_trans->dropped_roots_lock);
  266. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  267. extent_io_tree_init(&cur_trans->dirty_pages,
  268. fs_info->btree_inode->i_mapping);
  269. fs_info->generation++;
  270. cur_trans->transid = fs_info->generation;
  271. fs_info->running_transaction = cur_trans;
  272. cur_trans->aborted = 0;
  273. spin_unlock(&fs_info->trans_lock);
  274. return 0;
  275. }
  276. /*
  277. * this does all the record keeping required to make sure that a reference
  278. * counted root is properly recorded in a given transaction. This is required
  279. * to make sure the old root from before we joined the transaction is deleted
  280. * when the transaction commits
  281. */
  282. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  283. struct btrfs_root *root)
  284. {
  285. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  286. root->last_trans < trans->transid) {
  287. WARN_ON(root == root->fs_info->extent_root);
  288. WARN_ON(root->commit_root != root->node);
  289. /*
  290. * see below for IN_TRANS_SETUP usage rules
  291. * we have the reloc mutex held now, so there
  292. * is only one writer in this function
  293. */
  294. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  295. /* make sure readers find IN_TRANS_SETUP before
  296. * they find our root->last_trans update
  297. */
  298. smp_wmb();
  299. spin_lock(&root->fs_info->fs_roots_radix_lock);
  300. if (root->last_trans == trans->transid) {
  301. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  302. return 0;
  303. }
  304. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  305. (unsigned long)root->root_key.objectid,
  306. BTRFS_ROOT_TRANS_TAG);
  307. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  308. root->last_trans = trans->transid;
  309. /* this is pretty tricky. We don't want to
  310. * take the relocation lock in btrfs_record_root_in_trans
  311. * unless we're really doing the first setup for this root in
  312. * this transaction.
  313. *
  314. * Normally we'd use root->last_trans as a flag to decide
  315. * if we want to take the expensive mutex.
  316. *
  317. * But, we have to set root->last_trans before we
  318. * init the relocation root, otherwise, we trip over warnings
  319. * in ctree.c. The solution used here is to flag ourselves
  320. * with root IN_TRANS_SETUP. When this is 1, we're still
  321. * fixing up the reloc trees and everyone must wait.
  322. *
  323. * When this is zero, they can trust root->last_trans and fly
  324. * through btrfs_record_root_in_trans without having to take the
  325. * lock. smp_wmb() makes sure that all the writes above are
  326. * done before we pop in the zero below
  327. */
  328. btrfs_init_reloc_root(trans, root);
  329. smp_mb__before_atomic();
  330. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  331. }
  332. return 0;
  333. }
  334. void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
  335. struct btrfs_root *root)
  336. {
  337. struct btrfs_transaction *cur_trans = trans->transaction;
  338. /* Add ourselves to the transaction dropped list */
  339. spin_lock(&cur_trans->dropped_roots_lock);
  340. list_add_tail(&root->root_list, &cur_trans->dropped_roots);
  341. spin_unlock(&cur_trans->dropped_roots_lock);
  342. /* Make sure we don't try to update the root at commit time */
  343. spin_lock(&root->fs_info->fs_roots_radix_lock);
  344. radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
  345. (unsigned long)root->root_key.objectid,
  346. BTRFS_ROOT_TRANS_TAG);
  347. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  348. }
  349. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  350. struct btrfs_root *root)
  351. {
  352. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  353. return 0;
  354. /*
  355. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  356. * and barriers
  357. */
  358. smp_rmb();
  359. if (root->last_trans == trans->transid &&
  360. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  361. return 0;
  362. mutex_lock(&root->fs_info->reloc_mutex);
  363. record_root_in_trans(trans, root);
  364. mutex_unlock(&root->fs_info->reloc_mutex);
  365. return 0;
  366. }
  367. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  368. {
  369. return (trans->state >= TRANS_STATE_BLOCKED &&
  370. trans->state < TRANS_STATE_UNBLOCKED &&
  371. !trans->aborted);
  372. }
  373. /* wait for commit against the current transaction to become unblocked
  374. * when this is done, it is safe to start a new transaction, but the current
  375. * transaction might not be fully on disk.
  376. */
  377. static void wait_current_trans(struct btrfs_root *root)
  378. {
  379. struct btrfs_transaction *cur_trans;
  380. spin_lock(&root->fs_info->trans_lock);
  381. cur_trans = root->fs_info->running_transaction;
  382. if (cur_trans && is_transaction_blocked(cur_trans)) {
  383. atomic_inc(&cur_trans->use_count);
  384. spin_unlock(&root->fs_info->trans_lock);
  385. wait_event(root->fs_info->transaction_wait,
  386. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  387. cur_trans->aborted);
  388. btrfs_put_transaction(cur_trans);
  389. } else {
  390. spin_unlock(&root->fs_info->trans_lock);
  391. }
  392. }
  393. static int may_wait_transaction(struct btrfs_root *root, int type)
  394. {
  395. if (root->fs_info->log_root_recovering)
  396. return 0;
  397. if (type == TRANS_USERSPACE)
  398. return 1;
  399. if (type == TRANS_START &&
  400. !atomic_read(&root->fs_info->open_ioctl_trans))
  401. return 1;
  402. return 0;
  403. }
  404. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  405. {
  406. if (!root->fs_info->reloc_ctl ||
  407. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  408. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  409. root->reloc_root)
  410. return false;
  411. return true;
  412. }
  413. static struct btrfs_trans_handle *
  414. start_transaction(struct btrfs_root *root, unsigned int num_items,
  415. unsigned int type, enum btrfs_reserve_flush_enum flush)
  416. {
  417. struct btrfs_trans_handle *h;
  418. struct btrfs_transaction *cur_trans;
  419. u64 num_bytes = 0;
  420. u64 qgroup_reserved = 0;
  421. bool reloc_reserved = false;
  422. int ret;
  423. /* Send isn't supposed to start transactions. */
  424. ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
  425. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  426. return ERR_PTR(-EROFS);
  427. if (current->journal_info) {
  428. WARN_ON(type & TRANS_EXTWRITERS);
  429. h = current->journal_info;
  430. h->use_count++;
  431. WARN_ON(h->use_count > 2);
  432. h->orig_rsv = h->block_rsv;
  433. h->block_rsv = NULL;
  434. goto got_it;
  435. }
  436. /*
  437. * Do the reservation before we join the transaction so we can do all
  438. * the appropriate flushing if need be.
  439. */
  440. if (num_items > 0 && root != root->fs_info->chunk_root) {
  441. qgroup_reserved = num_items * root->nodesize;
  442. ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved);
  443. if (ret)
  444. return ERR_PTR(ret);
  445. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  446. /*
  447. * Do the reservation for the relocation root creation
  448. */
  449. if (need_reserve_reloc_root(root)) {
  450. num_bytes += root->nodesize;
  451. reloc_reserved = true;
  452. }
  453. ret = btrfs_block_rsv_add(root,
  454. &root->fs_info->trans_block_rsv,
  455. num_bytes, flush);
  456. if (ret)
  457. goto reserve_fail;
  458. }
  459. again:
  460. h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
  461. if (!h) {
  462. ret = -ENOMEM;
  463. goto alloc_fail;
  464. }
  465. /*
  466. * If we are JOIN_NOLOCK we're already committing a transaction and
  467. * waiting on this guy, so we don't need to do the sb_start_intwrite
  468. * because we're already holding a ref. We need this because we could
  469. * have raced in and did an fsync() on a file which can kick a commit
  470. * and then we deadlock with somebody doing a freeze.
  471. *
  472. * If we are ATTACH, it means we just want to catch the current
  473. * transaction and commit it, so we needn't do sb_start_intwrite().
  474. */
  475. if (type & __TRANS_FREEZABLE)
  476. sb_start_intwrite(root->fs_info->sb);
  477. if (may_wait_transaction(root, type))
  478. wait_current_trans(root);
  479. do {
  480. ret = join_transaction(root, type);
  481. if (ret == -EBUSY) {
  482. wait_current_trans(root);
  483. if (unlikely(type == TRANS_ATTACH))
  484. ret = -ENOENT;
  485. }
  486. } while (ret == -EBUSY);
  487. if (ret < 0) {
  488. /* We must get the transaction if we are JOIN_NOLOCK. */
  489. BUG_ON(type == TRANS_JOIN_NOLOCK);
  490. goto join_fail;
  491. }
  492. cur_trans = root->fs_info->running_transaction;
  493. h->transid = cur_trans->transid;
  494. h->transaction = cur_trans;
  495. h->root = root;
  496. h->use_count = 1;
  497. h->type = type;
  498. h->can_flush_pending_bgs = true;
  499. INIT_LIST_HEAD(&h->qgroup_ref_list);
  500. INIT_LIST_HEAD(&h->new_bgs);
  501. smp_mb();
  502. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  503. may_wait_transaction(root, type)) {
  504. current->journal_info = h;
  505. btrfs_commit_transaction(h, root);
  506. goto again;
  507. }
  508. if (num_bytes) {
  509. trace_btrfs_space_reservation(root->fs_info, "transaction",
  510. h->transid, num_bytes, 1);
  511. h->block_rsv = &root->fs_info->trans_block_rsv;
  512. h->bytes_reserved = num_bytes;
  513. h->reloc_reserved = reloc_reserved;
  514. }
  515. got_it:
  516. btrfs_record_root_in_trans(h, root);
  517. if (!current->journal_info && type != TRANS_USERSPACE)
  518. current->journal_info = h;
  519. return h;
  520. join_fail:
  521. if (type & __TRANS_FREEZABLE)
  522. sb_end_intwrite(root->fs_info->sb);
  523. kmem_cache_free(btrfs_trans_handle_cachep, h);
  524. alloc_fail:
  525. if (num_bytes)
  526. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  527. num_bytes);
  528. reserve_fail:
  529. btrfs_qgroup_free_meta(root, qgroup_reserved);
  530. return ERR_PTR(ret);
  531. }
  532. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  533. unsigned int num_items)
  534. {
  535. return start_transaction(root, num_items, TRANS_START,
  536. BTRFS_RESERVE_FLUSH_ALL);
  537. }
  538. struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
  539. struct btrfs_root *root,
  540. unsigned int num_items,
  541. int min_factor)
  542. {
  543. struct btrfs_trans_handle *trans;
  544. u64 num_bytes;
  545. int ret;
  546. trans = btrfs_start_transaction(root, num_items);
  547. if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
  548. return trans;
  549. trans = btrfs_start_transaction(root, 0);
  550. if (IS_ERR(trans))
  551. return trans;
  552. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  553. ret = btrfs_cond_migrate_bytes(root->fs_info,
  554. &root->fs_info->trans_block_rsv,
  555. num_bytes,
  556. min_factor);
  557. if (ret) {
  558. btrfs_end_transaction(trans, root);
  559. return ERR_PTR(ret);
  560. }
  561. trans->block_rsv = &root->fs_info->trans_block_rsv;
  562. trans->bytes_reserved = num_bytes;
  563. return trans;
  564. }
  565. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  566. struct btrfs_root *root,
  567. unsigned int num_items)
  568. {
  569. return start_transaction(root, num_items, TRANS_START,
  570. BTRFS_RESERVE_FLUSH_LIMIT);
  571. }
  572. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  573. {
  574. return start_transaction(root, 0, TRANS_JOIN,
  575. BTRFS_RESERVE_NO_FLUSH);
  576. }
  577. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  578. {
  579. return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
  580. BTRFS_RESERVE_NO_FLUSH);
  581. }
  582. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  583. {
  584. return start_transaction(root, 0, TRANS_USERSPACE,
  585. BTRFS_RESERVE_NO_FLUSH);
  586. }
  587. /*
  588. * btrfs_attach_transaction() - catch the running transaction
  589. *
  590. * It is used when we want to commit the current the transaction, but
  591. * don't want to start a new one.
  592. *
  593. * Note: If this function return -ENOENT, it just means there is no
  594. * running transaction. But it is possible that the inactive transaction
  595. * is still in the memory, not fully on disk. If you hope there is no
  596. * inactive transaction in the fs when -ENOENT is returned, you should
  597. * invoke
  598. * btrfs_attach_transaction_barrier()
  599. */
  600. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  601. {
  602. return start_transaction(root, 0, TRANS_ATTACH,
  603. BTRFS_RESERVE_NO_FLUSH);
  604. }
  605. /*
  606. * btrfs_attach_transaction_barrier() - catch the running transaction
  607. *
  608. * It is similar to the above function, the differentia is this one
  609. * will wait for all the inactive transactions until they fully
  610. * complete.
  611. */
  612. struct btrfs_trans_handle *
  613. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  614. {
  615. struct btrfs_trans_handle *trans;
  616. trans = start_transaction(root, 0, TRANS_ATTACH,
  617. BTRFS_RESERVE_NO_FLUSH);
  618. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  619. btrfs_wait_for_commit(root, 0);
  620. return trans;
  621. }
  622. /* wait for a transaction commit to be fully complete */
  623. static noinline void wait_for_commit(struct btrfs_root *root,
  624. struct btrfs_transaction *commit)
  625. {
  626. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  627. }
  628. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  629. {
  630. struct btrfs_transaction *cur_trans = NULL, *t;
  631. int ret = 0;
  632. if (transid) {
  633. if (transid <= root->fs_info->last_trans_committed)
  634. goto out;
  635. /* find specified transaction */
  636. spin_lock(&root->fs_info->trans_lock);
  637. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  638. if (t->transid == transid) {
  639. cur_trans = t;
  640. atomic_inc(&cur_trans->use_count);
  641. ret = 0;
  642. break;
  643. }
  644. if (t->transid > transid) {
  645. ret = 0;
  646. break;
  647. }
  648. }
  649. spin_unlock(&root->fs_info->trans_lock);
  650. /*
  651. * The specified transaction doesn't exist, or we
  652. * raced with btrfs_commit_transaction
  653. */
  654. if (!cur_trans) {
  655. if (transid > root->fs_info->last_trans_committed)
  656. ret = -EINVAL;
  657. goto out;
  658. }
  659. } else {
  660. /* find newest transaction that is committing | committed */
  661. spin_lock(&root->fs_info->trans_lock);
  662. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  663. list) {
  664. if (t->state >= TRANS_STATE_COMMIT_START) {
  665. if (t->state == TRANS_STATE_COMPLETED)
  666. break;
  667. cur_trans = t;
  668. atomic_inc(&cur_trans->use_count);
  669. break;
  670. }
  671. }
  672. spin_unlock(&root->fs_info->trans_lock);
  673. if (!cur_trans)
  674. goto out; /* nothing committing|committed */
  675. }
  676. wait_for_commit(root, cur_trans);
  677. btrfs_put_transaction(cur_trans);
  678. out:
  679. return ret;
  680. }
  681. void btrfs_throttle(struct btrfs_root *root)
  682. {
  683. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  684. wait_current_trans(root);
  685. }
  686. static int should_end_transaction(struct btrfs_trans_handle *trans,
  687. struct btrfs_root *root)
  688. {
  689. if (root->fs_info->global_block_rsv.space_info->full &&
  690. btrfs_check_space_for_delayed_refs(trans, root))
  691. return 1;
  692. return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  693. }
  694. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  695. struct btrfs_root *root)
  696. {
  697. struct btrfs_transaction *cur_trans = trans->transaction;
  698. int updates;
  699. int err;
  700. smp_mb();
  701. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  702. cur_trans->delayed_refs.flushing)
  703. return 1;
  704. updates = trans->delayed_ref_updates;
  705. trans->delayed_ref_updates = 0;
  706. if (updates) {
  707. err = btrfs_run_delayed_refs(trans, root, updates * 2);
  708. if (err) /* Error code will also eval true */
  709. return err;
  710. }
  711. return should_end_transaction(trans, root);
  712. }
  713. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  714. struct btrfs_root *root, int throttle)
  715. {
  716. struct btrfs_transaction *cur_trans = trans->transaction;
  717. struct btrfs_fs_info *info = root->fs_info;
  718. unsigned long cur = trans->delayed_ref_updates;
  719. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  720. int err = 0;
  721. int must_run_delayed_refs = 0;
  722. if (trans->use_count > 1) {
  723. trans->use_count--;
  724. trans->block_rsv = trans->orig_rsv;
  725. return 0;
  726. }
  727. btrfs_trans_release_metadata(trans, root);
  728. trans->block_rsv = NULL;
  729. if (!list_empty(&trans->new_bgs))
  730. btrfs_create_pending_block_groups(trans, root);
  731. trans->delayed_ref_updates = 0;
  732. if (!trans->sync) {
  733. must_run_delayed_refs =
  734. btrfs_should_throttle_delayed_refs(trans, root);
  735. cur = max_t(unsigned long, cur, 32);
  736. /*
  737. * don't make the caller wait if they are from a NOLOCK
  738. * or ATTACH transaction, it will deadlock with commit
  739. */
  740. if (must_run_delayed_refs == 1 &&
  741. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  742. must_run_delayed_refs = 2;
  743. }
  744. btrfs_trans_release_metadata(trans, root);
  745. trans->block_rsv = NULL;
  746. if (!list_empty(&trans->new_bgs))
  747. btrfs_create_pending_block_groups(trans, root);
  748. btrfs_trans_release_chunk_metadata(trans);
  749. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  750. should_end_transaction(trans, root) &&
  751. ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  752. spin_lock(&info->trans_lock);
  753. if (cur_trans->state == TRANS_STATE_RUNNING)
  754. cur_trans->state = TRANS_STATE_BLOCKED;
  755. spin_unlock(&info->trans_lock);
  756. }
  757. if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  758. if (throttle)
  759. return btrfs_commit_transaction(trans, root);
  760. else
  761. wake_up_process(info->transaction_kthread);
  762. }
  763. if (trans->type & __TRANS_FREEZABLE)
  764. sb_end_intwrite(root->fs_info->sb);
  765. WARN_ON(cur_trans != info->running_transaction);
  766. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  767. atomic_dec(&cur_trans->num_writers);
  768. extwriter_counter_dec(cur_trans, trans->type);
  769. /*
  770. * Make sure counter is updated before we wake up waiters.
  771. */
  772. smp_mb();
  773. if (waitqueue_active(&cur_trans->writer_wait))
  774. wake_up(&cur_trans->writer_wait);
  775. btrfs_put_transaction(cur_trans);
  776. if (current->journal_info == trans)
  777. current->journal_info = NULL;
  778. if (throttle)
  779. btrfs_run_delayed_iputs(root);
  780. if (trans->aborted ||
  781. test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  782. wake_up_process(info->transaction_kthread);
  783. err = -EIO;
  784. }
  785. assert_qgroups_uptodate(trans);
  786. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  787. if (must_run_delayed_refs) {
  788. btrfs_async_run_delayed_refs(root, cur,
  789. must_run_delayed_refs == 1);
  790. }
  791. return err;
  792. }
  793. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  794. struct btrfs_root *root)
  795. {
  796. return __btrfs_end_transaction(trans, root, 0);
  797. }
  798. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  799. struct btrfs_root *root)
  800. {
  801. return __btrfs_end_transaction(trans, root, 1);
  802. }
  803. /*
  804. * when btree blocks are allocated, they have some corresponding bits set for
  805. * them in one of two extent_io trees. This is used to make sure all of
  806. * those extents are sent to disk but does not wait on them
  807. */
  808. int btrfs_write_marked_extents(struct btrfs_root *root,
  809. struct extent_io_tree *dirty_pages, int mark)
  810. {
  811. int err = 0;
  812. int werr = 0;
  813. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  814. struct extent_state *cached_state = NULL;
  815. u64 start = 0;
  816. u64 end;
  817. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  818. mark, &cached_state)) {
  819. bool wait_writeback = false;
  820. err = convert_extent_bit(dirty_pages, start, end,
  821. EXTENT_NEED_WAIT,
  822. mark, &cached_state, GFP_NOFS);
  823. /*
  824. * convert_extent_bit can return -ENOMEM, which is most of the
  825. * time a temporary error. So when it happens, ignore the error
  826. * and wait for writeback of this range to finish - because we
  827. * failed to set the bit EXTENT_NEED_WAIT for the range, a call
  828. * to btrfs_wait_marked_extents() would not know that writeback
  829. * for this range started and therefore wouldn't wait for it to
  830. * finish - we don't want to commit a superblock that points to
  831. * btree nodes/leafs for which writeback hasn't finished yet
  832. * (and without errors).
  833. * We cleanup any entries left in the io tree when committing
  834. * the transaction (through clear_btree_io_tree()).
  835. */
  836. if (err == -ENOMEM) {
  837. err = 0;
  838. wait_writeback = true;
  839. }
  840. if (!err)
  841. err = filemap_fdatawrite_range(mapping, start, end);
  842. if (err)
  843. werr = err;
  844. else if (wait_writeback)
  845. werr = filemap_fdatawait_range(mapping, start, end);
  846. free_extent_state(cached_state);
  847. cached_state = NULL;
  848. cond_resched();
  849. start = end + 1;
  850. }
  851. return werr;
  852. }
  853. /*
  854. * when btree blocks are allocated, they have some corresponding bits set for
  855. * them in one of two extent_io trees. This is used to make sure all of
  856. * those extents are on disk for transaction or log commit. We wait
  857. * on all the pages and clear them from the dirty pages state tree
  858. */
  859. int btrfs_wait_marked_extents(struct btrfs_root *root,
  860. struct extent_io_tree *dirty_pages, int mark)
  861. {
  862. int err = 0;
  863. int werr = 0;
  864. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  865. struct extent_state *cached_state = NULL;
  866. u64 start = 0;
  867. u64 end;
  868. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  869. bool errors = false;
  870. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  871. EXTENT_NEED_WAIT, &cached_state)) {
  872. /*
  873. * Ignore -ENOMEM errors returned by clear_extent_bit().
  874. * When committing the transaction, we'll remove any entries
  875. * left in the io tree. For a log commit, we don't remove them
  876. * after committing the log because the tree can be accessed
  877. * concurrently - we do it only at transaction commit time when
  878. * it's safe to do it (through clear_btree_io_tree()).
  879. */
  880. err = clear_extent_bit(dirty_pages, start, end,
  881. EXTENT_NEED_WAIT,
  882. 0, 0, &cached_state, GFP_NOFS);
  883. if (err == -ENOMEM)
  884. err = 0;
  885. if (!err)
  886. err = filemap_fdatawait_range(mapping, start, end);
  887. if (err)
  888. werr = err;
  889. free_extent_state(cached_state);
  890. cached_state = NULL;
  891. cond_resched();
  892. start = end + 1;
  893. }
  894. if (err)
  895. werr = err;
  896. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  897. if ((mark & EXTENT_DIRTY) &&
  898. test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
  899. &btree_ino->runtime_flags))
  900. errors = true;
  901. if ((mark & EXTENT_NEW) &&
  902. test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
  903. &btree_ino->runtime_flags))
  904. errors = true;
  905. } else {
  906. if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
  907. &btree_ino->runtime_flags))
  908. errors = true;
  909. }
  910. if (errors && !werr)
  911. werr = -EIO;
  912. return werr;
  913. }
  914. /*
  915. * when btree blocks are allocated, they have some corresponding bits set for
  916. * them in one of two extent_io trees. This is used to make sure all of
  917. * those extents are on disk for transaction or log commit
  918. */
  919. static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  920. struct extent_io_tree *dirty_pages, int mark)
  921. {
  922. int ret;
  923. int ret2;
  924. struct blk_plug plug;
  925. blk_start_plug(&plug);
  926. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  927. blk_finish_plug(&plug);
  928. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  929. if (ret)
  930. return ret;
  931. if (ret2)
  932. return ret2;
  933. return 0;
  934. }
  935. static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  936. struct btrfs_root *root)
  937. {
  938. int ret;
  939. ret = btrfs_write_and_wait_marked_extents(root,
  940. &trans->transaction->dirty_pages,
  941. EXTENT_DIRTY);
  942. clear_btree_io_tree(&trans->transaction->dirty_pages);
  943. return ret;
  944. }
  945. /*
  946. * this is used to update the root pointer in the tree of tree roots.
  947. *
  948. * But, in the case of the extent allocation tree, updating the root
  949. * pointer may allocate blocks which may change the root of the extent
  950. * allocation tree.
  951. *
  952. * So, this loops and repeats and makes sure the cowonly root didn't
  953. * change while the root pointer was being updated in the metadata.
  954. */
  955. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  956. struct btrfs_root *root)
  957. {
  958. int ret;
  959. u64 old_root_bytenr;
  960. u64 old_root_used;
  961. struct btrfs_root *tree_root = root->fs_info->tree_root;
  962. old_root_used = btrfs_root_used(&root->root_item);
  963. while (1) {
  964. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  965. if (old_root_bytenr == root->node->start &&
  966. old_root_used == btrfs_root_used(&root->root_item))
  967. break;
  968. btrfs_set_root_node(&root->root_item, root->node);
  969. ret = btrfs_update_root(trans, tree_root,
  970. &root->root_key,
  971. &root->root_item);
  972. if (ret)
  973. return ret;
  974. old_root_used = btrfs_root_used(&root->root_item);
  975. }
  976. return 0;
  977. }
  978. /*
  979. * update all the cowonly tree roots on disk
  980. *
  981. * The error handling in this function may not be obvious. Any of the
  982. * failures will cause the file system to go offline. We still need
  983. * to clean up the delayed refs.
  984. */
  985. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  986. struct btrfs_root *root)
  987. {
  988. struct btrfs_fs_info *fs_info = root->fs_info;
  989. struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
  990. struct list_head *io_bgs = &trans->transaction->io_bgs;
  991. struct list_head *next;
  992. struct extent_buffer *eb;
  993. int ret;
  994. eb = btrfs_lock_root_node(fs_info->tree_root);
  995. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  996. 0, &eb);
  997. btrfs_tree_unlock(eb);
  998. free_extent_buffer(eb);
  999. if (ret)
  1000. return ret;
  1001. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1002. if (ret)
  1003. return ret;
  1004. ret = btrfs_run_dev_stats(trans, root->fs_info);
  1005. if (ret)
  1006. return ret;
  1007. ret = btrfs_run_dev_replace(trans, root->fs_info);
  1008. if (ret)
  1009. return ret;
  1010. ret = btrfs_run_qgroups(trans, root->fs_info);
  1011. if (ret)
  1012. return ret;
  1013. ret = btrfs_setup_space_cache(trans, root);
  1014. if (ret)
  1015. return ret;
  1016. /* run_qgroups might have added some more refs */
  1017. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1018. if (ret)
  1019. return ret;
  1020. again:
  1021. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  1022. next = fs_info->dirty_cowonly_roots.next;
  1023. list_del_init(next);
  1024. root = list_entry(next, struct btrfs_root, dirty_list);
  1025. clear_bit(BTRFS_ROOT_DIRTY, &root->state);
  1026. if (root != fs_info->extent_root)
  1027. list_add_tail(&root->dirty_list,
  1028. &trans->transaction->switch_commits);
  1029. ret = update_cowonly_root(trans, root);
  1030. if (ret)
  1031. return ret;
  1032. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1033. if (ret)
  1034. return ret;
  1035. }
  1036. while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
  1037. ret = btrfs_write_dirty_block_groups(trans, root);
  1038. if (ret)
  1039. return ret;
  1040. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1041. if (ret)
  1042. return ret;
  1043. }
  1044. if (!list_empty(&fs_info->dirty_cowonly_roots))
  1045. goto again;
  1046. list_add_tail(&fs_info->extent_root->dirty_list,
  1047. &trans->transaction->switch_commits);
  1048. btrfs_after_dev_replace_commit(fs_info);
  1049. return 0;
  1050. }
  1051. /*
  1052. * dead roots are old snapshots that need to be deleted. This allocates
  1053. * a dirty root struct and adds it into the list of dead roots that need to
  1054. * be deleted
  1055. */
  1056. void btrfs_add_dead_root(struct btrfs_root *root)
  1057. {
  1058. spin_lock(&root->fs_info->trans_lock);
  1059. if (list_empty(&root->root_list))
  1060. list_add_tail(&root->root_list, &root->fs_info->dead_roots);
  1061. spin_unlock(&root->fs_info->trans_lock);
  1062. }
  1063. /*
  1064. * update all the cowonly tree roots on disk
  1065. */
  1066. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  1067. struct btrfs_root *root)
  1068. {
  1069. struct btrfs_root *gang[8];
  1070. struct btrfs_fs_info *fs_info = root->fs_info;
  1071. int i;
  1072. int ret;
  1073. int err = 0;
  1074. spin_lock(&fs_info->fs_roots_radix_lock);
  1075. while (1) {
  1076. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  1077. (void **)gang, 0,
  1078. ARRAY_SIZE(gang),
  1079. BTRFS_ROOT_TRANS_TAG);
  1080. if (ret == 0)
  1081. break;
  1082. for (i = 0; i < ret; i++) {
  1083. root = gang[i];
  1084. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  1085. (unsigned long)root->root_key.objectid,
  1086. BTRFS_ROOT_TRANS_TAG);
  1087. spin_unlock(&fs_info->fs_roots_radix_lock);
  1088. btrfs_free_log(trans, root);
  1089. btrfs_update_reloc_root(trans, root);
  1090. btrfs_orphan_commit_root(trans, root);
  1091. btrfs_save_ino_cache(root, trans);
  1092. /* see comments in should_cow_block() */
  1093. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1094. smp_mb__after_atomic();
  1095. if (root->commit_root != root->node) {
  1096. list_add_tail(&root->dirty_list,
  1097. &trans->transaction->switch_commits);
  1098. btrfs_set_root_node(&root->root_item,
  1099. root->node);
  1100. }
  1101. err = btrfs_update_root(trans, fs_info->tree_root,
  1102. &root->root_key,
  1103. &root->root_item);
  1104. spin_lock(&fs_info->fs_roots_radix_lock);
  1105. if (err)
  1106. break;
  1107. btrfs_qgroup_free_meta_all(root);
  1108. }
  1109. }
  1110. spin_unlock(&fs_info->fs_roots_radix_lock);
  1111. return err;
  1112. }
  1113. /*
  1114. * defrag a given btree.
  1115. * Every leaf in the btree is read and defragged.
  1116. */
  1117. int btrfs_defrag_root(struct btrfs_root *root)
  1118. {
  1119. struct btrfs_fs_info *info = root->fs_info;
  1120. struct btrfs_trans_handle *trans;
  1121. int ret;
  1122. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  1123. return 0;
  1124. while (1) {
  1125. trans = btrfs_start_transaction(root, 0);
  1126. if (IS_ERR(trans))
  1127. return PTR_ERR(trans);
  1128. ret = btrfs_defrag_leaves(trans, root);
  1129. btrfs_end_transaction(trans, root);
  1130. btrfs_btree_balance_dirty(info->tree_root);
  1131. cond_resched();
  1132. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  1133. break;
  1134. if (btrfs_defrag_cancelled(root->fs_info)) {
  1135. pr_debug("BTRFS: defrag_root cancelled\n");
  1136. ret = -EAGAIN;
  1137. break;
  1138. }
  1139. }
  1140. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  1141. return ret;
  1142. }
  1143. /*
  1144. * new snapshots need to be created at a very specific time in the
  1145. * transaction commit. This does the actual creation.
  1146. *
  1147. * Note:
  1148. * If the error which may affect the commitment of the current transaction
  1149. * happens, we should return the error number. If the error which just affect
  1150. * the creation of the pending snapshots, just return 0.
  1151. */
  1152. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  1153. struct btrfs_fs_info *fs_info,
  1154. struct btrfs_pending_snapshot *pending)
  1155. {
  1156. struct btrfs_key key;
  1157. struct btrfs_root_item *new_root_item;
  1158. struct btrfs_root *tree_root = fs_info->tree_root;
  1159. struct btrfs_root *root = pending->root;
  1160. struct btrfs_root *parent_root;
  1161. struct btrfs_block_rsv *rsv;
  1162. struct inode *parent_inode;
  1163. struct btrfs_path *path;
  1164. struct btrfs_dir_item *dir_item;
  1165. struct dentry *dentry;
  1166. struct extent_buffer *tmp;
  1167. struct extent_buffer *old;
  1168. struct timespec cur_time = CURRENT_TIME;
  1169. int ret = 0;
  1170. u64 to_reserve = 0;
  1171. u64 index = 0;
  1172. u64 objectid;
  1173. u64 root_flags;
  1174. uuid_le new_uuid;
  1175. ASSERT(pending->path);
  1176. path = pending->path;
  1177. ASSERT(pending->root_item);
  1178. new_root_item = pending->root_item;
  1179. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1180. if (pending->error)
  1181. goto no_free_objectid;
  1182. /*
  1183. * Make qgroup to skip current new snapshot's qgroupid, as it is
  1184. * accounted by later btrfs_qgroup_inherit().
  1185. */
  1186. btrfs_set_skip_qgroup(trans, objectid);
  1187. btrfs_reloc_pre_snapshot(pending, &to_reserve);
  1188. if (to_reserve > 0) {
  1189. pending->error = btrfs_block_rsv_add(root,
  1190. &pending->block_rsv,
  1191. to_reserve,
  1192. BTRFS_RESERVE_NO_FLUSH);
  1193. if (pending->error)
  1194. goto clear_skip_qgroup;
  1195. }
  1196. key.objectid = objectid;
  1197. key.offset = (u64)-1;
  1198. key.type = BTRFS_ROOT_ITEM_KEY;
  1199. rsv = trans->block_rsv;
  1200. trans->block_rsv = &pending->block_rsv;
  1201. trans->bytes_reserved = trans->block_rsv->reserved;
  1202. dentry = pending->dentry;
  1203. parent_inode = pending->dir;
  1204. parent_root = BTRFS_I(parent_inode)->root;
  1205. record_root_in_trans(trans, parent_root);
  1206. /*
  1207. * insert the directory item
  1208. */
  1209. ret = btrfs_set_inode_index(parent_inode, &index);
  1210. BUG_ON(ret); /* -ENOMEM */
  1211. /* check if there is a file/dir which has the same name. */
  1212. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1213. btrfs_ino(parent_inode),
  1214. dentry->d_name.name,
  1215. dentry->d_name.len, 0);
  1216. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1217. pending->error = -EEXIST;
  1218. goto dir_item_existed;
  1219. } else if (IS_ERR(dir_item)) {
  1220. ret = PTR_ERR(dir_item);
  1221. btrfs_abort_transaction(trans, root, ret);
  1222. goto fail;
  1223. }
  1224. btrfs_release_path(path);
  1225. /*
  1226. * pull in the delayed directory update
  1227. * and the delayed inode item
  1228. * otherwise we corrupt the FS during
  1229. * snapshot
  1230. */
  1231. ret = btrfs_run_delayed_items(trans, root);
  1232. if (ret) { /* Transaction aborted */
  1233. btrfs_abort_transaction(trans, root, ret);
  1234. goto fail;
  1235. }
  1236. record_root_in_trans(trans, root);
  1237. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1238. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1239. btrfs_check_and_init_root_item(new_root_item);
  1240. root_flags = btrfs_root_flags(new_root_item);
  1241. if (pending->readonly)
  1242. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1243. else
  1244. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1245. btrfs_set_root_flags(new_root_item, root_flags);
  1246. btrfs_set_root_generation_v2(new_root_item,
  1247. trans->transid);
  1248. uuid_le_gen(&new_uuid);
  1249. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1250. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1251. BTRFS_UUID_SIZE);
  1252. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1253. memset(new_root_item->received_uuid, 0,
  1254. sizeof(new_root_item->received_uuid));
  1255. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1256. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1257. btrfs_set_root_stransid(new_root_item, 0);
  1258. btrfs_set_root_rtransid(new_root_item, 0);
  1259. }
  1260. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1261. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1262. btrfs_set_root_otransid(new_root_item, trans->transid);
  1263. old = btrfs_lock_root_node(root);
  1264. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1265. if (ret) {
  1266. btrfs_tree_unlock(old);
  1267. free_extent_buffer(old);
  1268. btrfs_abort_transaction(trans, root, ret);
  1269. goto fail;
  1270. }
  1271. btrfs_set_lock_blocking(old);
  1272. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1273. /* clean up in any case */
  1274. btrfs_tree_unlock(old);
  1275. free_extent_buffer(old);
  1276. if (ret) {
  1277. btrfs_abort_transaction(trans, root, ret);
  1278. goto fail;
  1279. }
  1280. /* see comments in should_cow_block() */
  1281. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1282. smp_wmb();
  1283. btrfs_set_root_node(new_root_item, tmp);
  1284. /* record when the snapshot was created in key.offset */
  1285. key.offset = trans->transid;
  1286. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1287. btrfs_tree_unlock(tmp);
  1288. free_extent_buffer(tmp);
  1289. if (ret) {
  1290. btrfs_abort_transaction(trans, root, ret);
  1291. goto fail;
  1292. }
  1293. /*
  1294. * insert root back/forward references
  1295. */
  1296. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1297. parent_root->root_key.objectid,
  1298. btrfs_ino(parent_inode), index,
  1299. dentry->d_name.name, dentry->d_name.len);
  1300. if (ret) {
  1301. btrfs_abort_transaction(trans, root, ret);
  1302. goto fail;
  1303. }
  1304. key.offset = (u64)-1;
  1305. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1306. if (IS_ERR(pending->snap)) {
  1307. ret = PTR_ERR(pending->snap);
  1308. btrfs_abort_transaction(trans, root, ret);
  1309. goto fail;
  1310. }
  1311. ret = btrfs_reloc_post_snapshot(trans, pending);
  1312. if (ret) {
  1313. btrfs_abort_transaction(trans, root, ret);
  1314. goto fail;
  1315. }
  1316. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1317. if (ret) {
  1318. btrfs_abort_transaction(trans, root, ret);
  1319. goto fail;
  1320. }
  1321. ret = btrfs_insert_dir_item(trans, parent_root,
  1322. dentry->d_name.name, dentry->d_name.len,
  1323. parent_inode, &key,
  1324. BTRFS_FT_DIR, index);
  1325. /* We have check then name at the beginning, so it is impossible. */
  1326. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1327. if (ret) {
  1328. btrfs_abort_transaction(trans, root, ret);
  1329. goto fail;
  1330. }
  1331. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1332. dentry->d_name.len * 2);
  1333. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1334. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1335. if (ret) {
  1336. btrfs_abort_transaction(trans, root, ret);
  1337. goto fail;
  1338. }
  1339. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
  1340. BTRFS_UUID_KEY_SUBVOL, objectid);
  1341. if (ret) {
  1342. btrfs_abort_transaction(trans, root, ret);
  1343. goto fail;
  1344. }
  1345. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1346. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
  1347. new_root_item->received_uuid,
  1348. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1349. objectid);
  1350. if (ret && ret != -EEXIST) {
  1351. btrfs_abort_transaction(trans, root, ret);
  1352. goto fail;
  1353. }
  1354. }
  1355. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1356. if (ret) {
  1357. btrfs_abort_transaction(trans, root, ret);
  1358. goto fail;
  1359. }
  1360. /*
  1361. * account qgroup counters before qgroup_inherit()
  1362. */
  1363. ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
  1364. if (ret)
  1365. goto fail;
  1366. ret = btrfs_qgroup_account_extents(trans, fs_info);
  1367. if (ret)
  1368. goto fail;
  1369. ret = btrfs_qgroup_inherit(trans, fs_info,
  1370. root->root_key.objectid,
  1371. objectid, pending->inherit);
  1372. if (ret) {
  1373. btrfs_abort_transaction(trans, root, ret);
  1374. goto fail;
  1375. }
  1376. fail:
  1377. pending->error = ret;
  1378. dir_item_existed:
  1379. trans->block_rsv = rsv;
  1380. trans->bytes_reserved = 0;
  1381. clear_skip_qgroup:
  1382. btrfs_clear_skip_qgroup(trans);
  1383. no_free_objectid:
  1384. kfree(new_root_item);
  1385. pending->root_item = NULL;
  1386. btrfs_free_path(path);
  1387. pending->path = NULL;
  1388. return ret;
  1389. }
  1390. /*
  1391. * create all the snapshots we've scheduled for creation
  1392. */
  1393. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1394. struct btrfs_fs_info *fs_info)
  1395. {
  1396. struct btrfs_pending_snapshot *pending, *next;
  1397. struct list_head *head = &trans->transaction->pending_snapshots;
  1398. int ret = 0;
  1399. list_for_each_entry_safe(pending, next, head, list) {
  1400. list_del(&pending->list);
  1401. ret = create_pending_snapshot(trans, fs_info, pending);
  1402. if (ret)
  1403. break;
  1404. }
  1405. return ret;
  1406. }
  1407. static void update_super_roots(struct btrfs_root *root)
  1408. {
  1409. struct btrfs_root_item *root_item;
  1410. struct btrfs_super_block *super;
  1411. super = root->fs_info->super_copy;
  1412. root_item = &root->fs_info->chunk_root->root_item;
  1413. super->chunk_root = root_item->bytenr;
  1414. super->chunk_root_generation = root_item->generation;
  1415. super->chunk_root_level = root_item->level;
  1416. root_item = &root->fs_info->tree_root->root_item;
  1417. super->root = root_item->bytenr;
  1418. super->generation = root_item->generation;
  1419. super->root_level = root_item->level;
  1420. if (btrfs_test_opt(root, SPACE_CACHE))
  1421. super->cache_generation = root_item->generation;
  1422. if (root->fs_info->update_uuid_tree_gen)
  1423. super->uuid_tree_generation = root_item->generation;
  1424. }
  1425. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1426. {
  1427. struct btrfs_transaction *trans;
  1428. int ret = 0;
  1429. spin_lock(&info->trans_lock);
  1430. trans = info->running_transaction;
  1431. if (trans)
  1432. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1433. spin_unlock(&info->trans_lock);
  1434. return ret;
  1435. }
  1436. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1437. {
  1438. struct btrfs_transaction *trans;
  1439. int ret = 0;
  1440. spin_lock(&info->trans_lock);
  1441. trans = info->running_transaction;
  1442. if (trans)
  1443. ret = is_transaction_blocked(trans);
  1444. spin_unlock(&info->trans_lock);
  1445. return ret;
  1446. }
  1447. /*
  1448. * wait for the current transaction commit to start and block subsequent
  1449. * transaction joins
  1450. */
  1451. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1452. struct btrfs_transaction *trans)
  1453. {
  1454. wait_event(root->fs_info->transaction_blocked_wait,
  1455. trans->state >= TRANS_STATE_COMMIT_START ||
  1456. trans->aborted);
  1457. }
  1458. /*
  1459. * wait for the current transaction to start and then become unblocked.
  1460. * caller holds ref.
  1461. */
  1462. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1463. struct btrfs_transaction *trans)
  1464. {
  1465. wait_event(root->fs_info->transaction_wait,
  1466. trans->state >= TRANS_STATE_UNBLOCKED ||
  1467. trans->aborted);
  1468. }
  1469. /*
  1470. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1471. * returns, any subsequent transaction will not be allowed to join.
  1472. */
  1473. struct btrfs_async_commit {
  1474. struct btrfs_trans_handle *newtrans;
  1475. struct btrfs_root *root;
  1476. struct work_struct work;
  1477. };
  1478. static void do_async_commit(struct work_struct *work)
  1479. {
  1480. struct btrfs_async_commit *ac =
  1481. container_of(work, struct btrfs_async_commit, work);
  1482. /*
  1483. * We've got freeze protection passed with the transaction.
  1484. * Tell lockdep about it.
  1485. */
  1486. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1487. __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
  1488. current->journal_info = ac->newtrans;
  1489. btrfs_commit_transaction(ac->newtrans, ac->root);
  1490. kfree(ac);
  1491. }
  1492. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1493. struct btrfs_root *root,
  1494. int wait_for_unblock)
  1495. {
  1496. struct btrfs_async_commit *ac;
  1497. struct btrfs_transaction *cur_trans;
  1498. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1499. if (!ac)
  1500. return -ENOMEM;
  1501. INIT_WORK(&ac->work, do_async_commit);
  1502. ac->root = root;
  1503. ac->newtrans = btrfs_join_transaction(root);
  1504. if (IS_ERR(ac->newtrans)) {
  1505. int err = PTR_ERR(ac->newtrans);
  1506. kfree(ac);
  1507. return err;
  1508. }
  1509. /* take transaction reference */
  1510. cur_trans = trans->transaction;
  1511. atomic_inc(&cur_trans->use_count);
  1512. btrfs_end_transaction(trans, root);
  1513. /*
  1514. * Tell lockdep we've released the freeze rwsem, since the
  1515. * async commit thread will be the one to unlock it.
  1516. */
  1517. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1518. __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
  1519. schedule_work(&ac->work);
  1520. /* wait for transaction to start and unblock */
  1521. if (wait_for_unblock)
  1522. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1523. else
  1524. wait_current_trans_commit_start(root, cur_trans);
  1525. if (current->journal_info == trans)
  1526. current->journal_info = NULL;
  1527. btrfs_put_transaction(cur_trans);
  1528. return 0;
  1529. }
  1530. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1531. struct btrfs_root *root, int err)
  1532. {
  1533. struct btrfs_transaction *cur_trans = trans->transaction;
  1534. DEFINE_WAIT(wait);
  1535. WARN_ON(trans->use_count > 1);
  1536. btrfs_abort_transaction(trans, root, err);
  1537. spin_lock(&root->fs_info->trans_lock);
  1538. /*
  1539. * If the transaction is removed from the list, it means this
  1540. * transaction has been committed successfully, so it is impossible
  1541. * to call the cleanup function.
  1542. */
  1543. BUG_ON(list_empty(&cur_trans->list));
  1544. list_del_init(&cur_trans->list);
  1545. if (cur_trans == root->fs_info->running_transaction) {
  1546. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1547. spin_unlock(&root->fs_info->trans_lock);
  1548. wait_event(cur_trans->writer_wait,
  1549. atomic_read(&cur_trans->num_writers) == 1);
  1550. spin_lock(&root->fs_info->trans_lock);
  1551. }
  1552. spin_unlock(&root->fs_info->trans_lock);
  1553. btrfs_cleanup_one_transaction(trans->transaction, root);
  1554. spin_lock(&root->fs_info->trans_lock);
  1555. if (cur_trans == root->fs_info->running_transaction)
  1556. root->fs_info->running_transaction = NULL;
  1557. spin_unlock(&root->fs_info->trans_lock);
  1558. if (trans->type & __TRANS_FREEZABLE)
  1559. sb_end_intwrite(root->fs_info->sb);
  1560. btrfs_put_transaction(cur_trans);
  1561. btrfs_put_transaction(cur_trans);
  1562. trace_btrfs_transaction_commit(root);
  1563. if (current->journal_info == trans)
  1564. current->journal_info = NULL;
  1565. btrfs_scrub_cancel(root->fs_info);
  1566. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1567. }
  1568. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1569. {
  1570. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1571. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1572. return 0;
  1573. }
  1574. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1575. {
  1576. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1577. btrfs_wait_ordered_roots(fs_info, -1);
  1578. }
  1579. static inline void
  1580. btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
  1581. {
  1582. wait_event(cur_trans->pending_wait,
  1583. atomic_read(&cur_trans->pending_ordered) == 0);
  1584. }
  1585. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1586. struct btrfs_root *root)
  1587. {
  1588. struct btrfs_transaction *cur_trans = trans->transaction;
  1589. struct btrfs_transaction *prev_trans = NULL;
  1590. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  1591. int ret;
  1592. /* Stop the commit early if ->aborted is set */
  1593. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1594. ret = cur_trans->aborted;
  1595. btrfs_end_transaction(trans, root);
  1596. return ret;
  1597. }
  1598. /* make a pass through all the delayed refs we have so far
  1599. * any runnings procs may add more while we are here
  1600. */
  1601. ret = btrfs_run_delayed_refs(trans, root, 0);
  1602. if (ret) {
  1603. btrfs_end_transaction(trans, root);
  1604. return ret;
  1605. }
  1606. btrfs_trans_release_metadata(trans, root);
  1607. trans->block_rsv = NULL;
  1608. cur_trans = trans->transaction;
  1609. /*
  1610. * set the flushing flag so procs in this transaction have to
  1611. * start sending their work down.
  1612. */
  1613. cur_trans->delayed_refs.flushing = 1;
  1614. smp_wmb();
  1615. if (!list_empty(&trans->new_bgs))
  1616. btrfs_create_pending_block_groups(trans, root);
  1617. ret = btrfs_run_delayed_refs(trans, root, 0);
  1618. if (ret) {
  1619. btrfs_end_transaction(trans, root);
  1620. return ret;
  1621. }
  1622. if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
  1623. int run_it = 0;
  1624. /* this mutex is also taken before trying to set
  1625. * block groups readonly. We need to make sure
  1626. * that nobody has set a block group readonly
  1627. * after a extents from that block group have been
  1628. * allocated for cache files. btrfs_set_block_group_ro
  1629. * will wait for the transaction to commit if it
  1630. * finds BTRFS_TRANS_DIRTY_BG_RUN set.
  1631. *
  1632. * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
  1633. * only one process starts all the block group IO. It wouldn't
  1634. * hurt to have more than one go through, but there's no
  1635. * real advantage to it either.
  1636. */
  1637. mutex_lock(&root->fs_info->ro_block_group_mutex);
  1638. if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
  1639. &cur_trans->flags))
  1640. run_it = 1;
  1641. mutex_unlock(&root->fs_info->ro_block_group_mutex);
  1642. if (run_it)
  1643. ret = btrfs_start_dirty_block_groups(trans, root);
  1644. }
  1645. if (ret) {
  1646. btrfs_end_transaction(trans, root);
  1647. return ret;
  1648. }
  1649. spin_lock(&root->fs_info->trans_lock);
  1650. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1651. spin_unlock(&root->fs_info->trans_lock);
  1652. atomic_inc(&cur_trans->use_count);
  1653. ret = btrfs_end_transaction(trans, root);
  1654. wait_for_commit(root, cur_trans);
  1655. if (unlikely(cur_trans->aborted))
  1656. ret = cur_trans->aborted;
  1657. btrfs_put_transaction(cur_trans);
  1658. return ret;
  1659. }
  1660. cur_trans->state = TRANS_STATE_COMMIT_START;
  1661. wake_up(&root->fs_info->transaction_blocked_wait);
  1662. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1663. prev_trans = list_entry(cur_trans->list.prev,
  1664. struct btrfs_transaction, list);
  1665. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1666. atomic_inc(&prev_trans->use_count);
  1667. spin_unlock(&root->fs_info->trans_lock);
  1668. wait_for_commit(root, prev_trans);
  1669. ret = prev_trans->aborted;
  1670. btrfs_put_transaction(prev_trans);
  1671. if (ret)
  1672. goto cleanup_transaction;
  1673. } else {
  1674. spin_unlock(&root->fs_info->trans_lock);
  1675. }
  1676. } else {
  1677. spin_unlock(&root->fs_info->trans_lock);
  1678. }
  1679. extwriter_counter_dec(cur_trans, trans->type);
  1680. ret = btrfs_start_delalloc_flush(root->fs_info);
  1681. if (ret)
  1682. goto cleanup_transaction;
  1683. ret = btrfs_run_delayed_items(trans, root);
  1684. if (ret)
  1685. goto cleanup_transaction;
  1686. wait_event(cur_trans->writer_wait,
  1687. extwriter_counter_read(cur_trans) == 0);
  1688. /* some pending stuffs might be added after the previous flush. */
  1689. ret = btrfs_run_delayed_items(trans, root);
  1690. if (ret)
  1691. goto cleanup_transaction;
  1692. btrfs_wait_delalloc_flush(root->fs_info);
  1693. btrfs_wait_pending_ordered(cur_trans);
  1694. btrfs_scrub_pause(root);
  1695. /*
  1696. * Ok now we need to make sure to block out any other joins while we
  1697. * commit the transaction. We could have started a join before setting
  1698. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1699. */
  1700. spin_lock(&root->fs_info->trans_lock);
  1701. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1702. spin_unlock(&root->fs_info->trans_lock);
  1703. wait_event(cur_trans->writer_wait,
  1704. atomic_read(&cur_trans->num_writers) == 1);
  1705. /* ->aborted might be set after the previous check, so check it */
  1706. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1707. ret = cur_trans->aborted;
  1708. goto scrub_continue;
  1709. }
  1710. /*
  1711. * the reloc mutex makes sure that we stop
  1712. * the balancing code from coming in and moving
  1713. * extents around in the middle of the commit
  1714. */
  1715. mutex_lock(&root->fs_info->reloc_mutex);
  1716. /*
  1717. * We needn't worry about the delayed items because we will
  1718. * deal with them in create_pending_snapshot(), which is the
  1719. * core function of the snapshot creation.
  1720. */
  1721. ret = create_pending_snapshots(trans, root->fs_info);
  1722. if (ret) {
  1723. mutex_unlock(&root->fs_info->reloc_mutex);
  1724. goto scrub_continue;
  1725. }
  1726. /*
  1727. * We insert the dir indexes of the snapshots and update the inode
  1728. * of the snapshots' parents after the snapshot creation, so there
  1729. * are some delayed items which are not dealt with. Now deal with
  1730. * them.
  1731. *
  1732. * We needn't worry that this operation will corrupt the snapshots,
  1733. * because all the tree which are snapshoted will be forced to COW
  1734. * the nodes and leaves.
  1735. */
  1736. ret = btrfs_run_delayed_items(trans, root);
  1737. if (ret) {
  1738. mutex_unlock(&root->fs_info->reloc_mutex);
  1739. goto scrub_continue;
  1740. }
  1741. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1742. if (ret) {
  1743. mutex_unlock(&root->fs_info->reloc_mutex);
  1744. goto scrub_continue;
  1745. }
  1746. /* Reocrd old roots for later qgroup accounting */
  1747. ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
  1748. if (ret) {
  1749. mutex_unlock(&root->fs_info->reloc_mutex);
  1750. goto scrub_continue;
  1751. }
  1752. /*
  1753. * make sure none of the code above managed to slip in a
  1754. * delayed item
  1755. */
  1756. btrfs_assert_delayed_root_empty(root);
  1757. WARN_ON(cur_trans != trans->transaction);
  1758. /* btrfs_commit_tree_roots is responsible for getting the
  1759. * various roots consistent with each other. Every pointer
  1760. * in the tree of tree roots has to point to the most up to date
  1761. * root for every subvolume and other tree. So, we have to keep
  1762. * the tree logging code from jumping in and changing any
  1763. * of the trees.
  1764. *
  1765. * At this point in the commit, there can't be any tree-log
  1766. * writers, but a little lower down we drop the trans mutex
  1767. * and let new people in. By holding the tree_log_mutex
  1768. * from now until after the super is written, we avoid races
  1769. * with the tree-log code.
  1770. */
  1771. mutex_lock(&root->fs_info->tree_log_mutex);
  1772. ret = commit_fs_roots(trans, root);
  1773. if (ret) {
  1774. mutex_unlock(&root->fs_info->tree_log_mutex);
  1775. mutex_unlock(&root->fs_info->reloc_mutex);
  1776. goto scrub_continue;
  1777. }
  1778. /*
  1779. * Since the transaction is done, we can apply the pending changes
  1780. * before the next transaction.
  1781. */
  1782. btrfs_apply_pending_changes(root->fs_info);
  1783. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1784. * safe to free the root of tree log roots
  1785. */
  1786. btrfs_free_log_root_tree(trans, root->fs_info);
  1787. /*
  1788. * Since fs roots are all committed, we can get a quite accurate
  1789. * new_roots. So let's do quota accounting.
  1790. */
  1791. ret = btrfs_qgroup_account_extents(trans, root->fs_info);
  1792. if (ret < 0) {
  1793. mutex_unlock(&root->fs_info->tree_log_mutex);
  1794. mutex_unlock(&root->fs_info->reloc_mutex);
  1795. goto scrub_continue;
  1796. }
  1797. ret = commit_cowonly_roots(trans, root);
  1798. if (ret) {
  1799. mutex_unlock(&root->fs_info->tree_log_mutex);
  1800. mutex_unlock(&root->fs_info->reloc_mutex);
  1801. goto scrub_continue;
  1802. }
  1803. /*
  1804. * The tasks which save the space cache and inode cache may also
  1805. * update ->aborted, check it.
  1806. */
  1807. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1808. ret = cur_trans->aborted;
  1809. mutex_unlock(&root->fs_info->tree_log_mutex);
  1810. mutex_unlock(&root->fs_info->reloc_mutex);
  1811. goto scrub_continue;
  1812. }
  1813. btrfs_prepare_extent_commit(trans, root);
  1814. cur_trans = root->fs_info->running_transaction;
  1815. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1816. root->fs_info->tree_root->node);
  1817. list_add_tail(&root->fs_info->tree_root->dirty_list,
  1818. &cur_trans->switch_commits);
  1819. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1820. root->fs_info->chunk_root->node);
  1821. list_add_tail(&root->fs_info->chunk_root->dirty_list,
  1822. &cur_trans->switch_commits);
  1823. switch_commit_roots(cur_trans, root->fs_info);
  1824. assert_qgroups_uptodate(trans);
  1825. ASSERT(list_empty(&cur_trans->dirty_bgs));
  1826. ASSERT(list_empty(&cur_trans->io_bgs));
  1827. update_super_roots(root);
  1828. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1829. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1830. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1831. sizeof(*root->fs_info->super_copy));
  1832. btrfs_update_commit_device_size(root->fs_info);
  1833. btrfs_update_commit_device_bytes_used(root, cur_trans);
  1834. clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
  1835. clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
  1836. btrfs_trans_release_chunk_metadata(trans);
  1837. spin_lock(&root->fs_info->trans_lock);
  1838. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1839. root->fs_info->running_transaction = NULL;
  1840. spin_unlock(&root->fs_info->trans_lock);
  1841. mutex_unlock(&root->fs_info->reloc_mutex);
  1842. wake_up(&root->fs_info->transaction_wait);
  1843. ret = btrfs_write_and_wait_transaction(trans, root);
  1844. if (ret) {
  1845. btrfs_std_error(root->fs_info, ret,
  1846. "Error while writing out transaction");
  1847. mutex_unlock(&root->fs_info->tree_log_mutex);
  1848. goto scrub_continue;
  1849. }
  1850. ret = write_ctree_super(trans, root, 0);
  1851. if (ret) {
  1852. mutex_unlock(&root->fs_info->tree_log_mutex);
  1853. goto scrub_continue;
  1854. }
  1855. /*
  1856. * the super is written, we can safely allow the tree-loggers
  1857. * to go about their business
  1858. */
  1859. mutex_unlock(&root->fs_info->tree_log_mutex);
  1860. btrfs_finish_extent_commit(trans, root);
  1861. if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
  1862. btrfs_clear_space_info_full(root->fs_info);
  1863. root->fs_info->last_trans_committed = cur_trans->transid;
  1864. /*
  1865. * We needn't acquire the lock here because there is no other task
  1866. * which can change it.
  1867. */
  1868. cur_trans->state = TRANS_STATE_COMPLETED;
  1869. wake_up(&cur_trans->commit_wait);
  1870. spin_lock(&root->fs_info->trans_lock);
  1871. list_del_init(&cur_trans->list);
  1872. spin_unlock(&root->fs_info->trans_lock);
  1873. btrfs_put_transaction(cur_trans);
  1874. btrfs_put_transaction(cur_trans);
  1875. if (trans->type & __TRANS_FREEZABLE)
  1876. sb_end_intwrite(root->fs_info->sb);
  1877. trace_btrfs_transaction_commit(root);
  1878. btrfs_scrub_continue(root);
  1879. if (current->journal_info == trans)
  1880. current->journal_info = NULL;
  1881. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1882. if (current != root->fs_info->transaction_kthread &&
  1883. current != root->fs_info->cleaner_kthread)
  1884. btrfs_run_delayed_iputs(root);
  1885. return ret;
  1886. scrub_continue:
  1887. btrfs_scrub_continue(root);
  1888. cleanup_transaction:
  1889. btrfs_trans_release_metadata(trans, root);
  1890. btrfs_trans_release_chunk_metadata(trans);
  1891. trans->block_rsv = NULL;
  1892. btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
  1893. if (current->journal_info == trans)
  1894. current->journal_info = NULL;
  1895. cleanup_transaction(trans, root, ret);
  1896. return ret;
  1897. }
  1898. /*
  1899. * return < 0 if error
  1900. * 0 if there are no more dead_roots at the time of call
  1901. * 1 there are more to be processed, call me again
  1902. *
  1903. * The return value indicates there are certainly more snapshots to delete, but
  1904. * if there comes a new one during processing, it may return 0. We don't mind,
  1905. * because btrfs_commit_super will poke cleaner thread and it will process it a
  1906. * few seconds later.
  1907. */
  1908. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  1909. {
  1910. int ret;
  1911. struct btrfs_fs_info *fs_info = root->fs_info;
  1912. spin_lock(&fs_info->trans_lock);
  1913. if (list_empty(&fs_info->dead_roots)) {
  1914. spin_unlock(&fs_info->trans_lock);
  1915. return 0;
  1916. }
  1917. root = list_first_entry(&fs_info->dead_roots,
  1918. struct btrfs_root, root_list);
  1919. list_del_init(&root->root_list);
  1920. spin_unlock(&fs_info->trans_lock);
  1921. pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
  1922. btrfs_kill_all_delayed_nodes(root);
  1923. if (btrfs_header_backref_rev(root->node) <
  1924. BTRFS_MIXED_BACKREF_REV)
  1925. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1926. else
  1927. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  1928. return (ret < 0) ? 0 : 1;
  1929. }
  1930. void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
  1931. {
  1932. unsigned long prev;
  1933. unsigned long bit;
  1934. prev = xchg(&fs_info->pending_changes, 0);
  1935. if (!prev)
  1936. return;
  1937. bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
  1938. if (prev & bit)
  1939. btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  1940. prev &= ~bit;
  1941. bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
  1942. if (prev & bit)
  1943. btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
  1944. prev &= ~bit;
  1945. bit = 1 << BTRFS_PENDING_COMMIT;
  1946. if (prev & bit)
  1947. btrfs_debug(fs_info, "pending commit done");
  1948. prev &= ~bit;
  1949. if (prev)
  1950. btrfs_warn(fs_info,
  1951. "unknown pending changes left 0x%lx, ignoring", prev);
  1952. }