ordered-data.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/writeback.h>
  21. #include <linux/pagevec.h>
  22. #include "ctree.h"
  23. #include "transaction.h"
  24. #include "btrfs_inode.h"
  25. #include "extent_io.h"
  26. #include "disk-io.h"
  27. static struct kmem_cache *btrfs_ordered_extent_cache;
  28. static u64 entry_end(struct btrfs_ordered_extent *entry)
  29. {
  30. if (entry->file_offset + entry->len < entry->file_offset)
  31. return (u64)-1;
  32. return entry->file_offset + entry->len;
  33. }
  34. /* returns NULL if the insertion worked, or it returns the node it did find
  35. * in the tree
  36. */
  37. static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  38. struct rb_node *node)
  39. {
  40. struct rb_node **p = &root->rb_node;
  41. struct rb_node *parent = NULL;
  42. struct btrfs_ordered_extent *entry;
  43. while (*p) {
  44. parent = *p;
  45. entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  46. if (file_offset < entry->file_offset)
  47. p = &(*p)->rb_left;
  48. else if (file_offset >= entry_end(entry))
  49. p = &(*p)->rb_right;
  50. else
  51. return parent;
  52. }
  53. rb_link_node(node, parent, p);
  54. rb_insert_color(node, root);
  55. return NULL;
  56. }
  57. static void ordered_data_tree_panic(struct inode *inode, int errno,
  58. u64 offset)
  59. {
  60. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  61. btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
  62. "%llu", offset);
  63. }
  64. /*
  65. * look for a given offset in the tree, and if it can't be found return the
  66. * first lesser offset
  67. */
  68. static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  69. struct rb_node **prev_ret)
  70. {
  71. struct rb_node *n = root->rb_node;
  72. struct rb_node *prev = NULL;
  73. struct rb_node *test;
  74. struct btrfs_ordered_extent *entry;
  75. struct btrfs_ordered_extent *prev_entry = NULL;
  76. while (n) {
  77. entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  78. prev = n;
  79. prev_entry = entry;
  80. if (file_offset < entry->file_offset)
  81. n = n->rb_left;
  82. else if (file_offset >= entry_end(entry))
  83. n = n->rb_right;
  84. else
  85. return n;
  86. }
  87. if (!prev_ret)
  88. return NULL;
  89. while (prev && file_offset >= entry_end(prev_entry)) {
  90. test = rb_next(prev);
  91. if (!test)
  92. break;
  93. prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  94. rb_node);
  95. if (file_offset < entry_end(prev_entry))
  96. break;
  97. prev = test;
  98. }
  99. if (prev)
  100. prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
  101. rb_node);
  102. while (prev && file_offset < entry_end(prev_entry)) {
  103. test = rb_prev(prev);
  104. if (!test)
  105. break;
  106. prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  107. rb_node);
  108. prev = test;
  109. }
  110. *prev_ret = prev;
  111. return NULL;
  112. }
  113. /*
  114. * helper to check if a given offset is inside a given entry
  115. */
  116. static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
  117. {
  118. if (file_offset < entry->file_offset ||
  119. entry->file_offset + entry->len <= file_offset)
  120. return 0;
  121. return 1;
  122. }
  123. static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
  124. u64 len)
  125. {
  126. if (file_offset + len <= entry->file_offset ||
  127. entry->file_offset + entry->len <= file_offset)
  128. return 0;
  129. return 1;
  130. }
  131. /*
  132. * look find the first ordered struct that has this offset, otherwise
  133. * the first one less than this offset
  134. */
  135. static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
  136. u64 file_offset)
  137. {
  138. struct rb_root *root = &tree->tree;
  139. struct rb_node *prev = NULL;
  140. struct rb_node *ret;
  141. struct btrfs_ordered_extent *entry;
  142. if (tree->last) {
  143. entry = rb_entry(tree->last, struct btrfs_ordered_extent,
  144. rb_node);
  145. if (offset_in_entry(entry, file_offset))
  146. return tree->last;
  147. }
  148. ret = __tree_search(root, file_offset, &prev);
  149. if (!ret)
  150. ret = prev;
  151. if (ret)
  152. tree->last = ret;
  153. return ret;
  154. }
  155. /* allocate and add a new ordered_extent into the per-inode tree.
  156. * file_offset is the logical offset in the file
  157. *
  158. * start is the disk block number of an extent already reserved in the
  159. * extent allocation tree
  160. *
  161. * len is the length of the extent
  162. *
  163. * The tree is given a single reference on the ordered extent that was
  164. * inserted.
  165. */
  166. static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
  167. u64 start, u64 len, u64 disk_len,
  168. int type, int dio, int compress_type)
  169. {
  170. struct btrfs_root *root = BTRFS_I(inode)->root;
  171. struct btrfs_ordered_inode_tree *tree;
  172. struct rb_node *node;
  173. struct btrfs_ordered_extent *entry;
  174. tree = &BTRFS_I(inode)->ordered_tree;
  175. entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
  176. if (!entry)
  177. return -ENOMEM;
  178. entry->file_offset = file_offset;
  179. entry->start = start;
  180. entry->len = len;
  181. entry->disk_len = disk_len;
  182. entry->bytes_left = len;
  183. entry->inode = igrab(inode);
  184. entry->compress_type = compress_type;
  185. entry->truncated_len = (u64)-1;
  186. if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
  187. set_bit(type, &entry->flags);
  188. if (dio)
  189. set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
  190. /* one ref for the tree */
  191. atomic_set(&entry->refs, 1);
  192. init_waitqueue_head(&entry->wait);
  193. INIT_LIST_HEAD(&entry->list);
  194. INIT_LIST_HEAD(&entry->root_extent_list);
  195. INIT_LIST_HEAD(&entry->work_list);
  196. init_completion(&entry->completion);
  197. INIT_LIST_HEAD(&entry->log_list);
  198. INIT_LIST_HEAD(&entry->trans_list);
  199. trace_btrfs_ordered_extent_add(inode, entry);
  200. spin_lock_irq(&tree->lock);
  201. node = tree_insert(&tree->tree, file_offset,
  202. &entry->rb_node);
  203. if (node)
  204. ordered_data_tree_panic(inode, -EEXIST, file_offset);
  205. spin_unlock_irq(&tree->lock);
  206. spin_lock(&root->ordered_extent_lock);
  207. list_add_tail(&entry->root_extent_list,
  208. &root->ordered_extents);
  209. root->nr_ordered_extents++;
  210. if (root->nr_ordered_extents == 1) {
  211. spin_lock(&root->fs_info->ordered_root_lock);
  212. BUG_ON(!list_empty(&root->ordered_root));
  213. list_add_tail(&root->ordered_root,
  214. &root->fs_info->ordered_roots);
  215. spin_unlock(&root->fs_info->ordered_root_lock);
  216. }
  217. spin_unlock(&root->ordered_extent_lock);
  218. return 0;
  219. }
  220. int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
  221. u64 start, u64 len, u64 disk_len, int type)
  222. {
  223. return __btrfs_add_ordered_extent(inode, file_offset, start, len,
  224. disk_len, type, 0,
  225. BTRFS_COMPRESS_NONE);
  226. }
  227. int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
  228. u64 start, u64 len, u64 disk_len, int type)
  229. {
  230. return __btrfs_add_ordered_extent(inode, file_offset, start, len,
  231. disk_len, type, 1,
  232. BTRFS_COMPRESS_NONE);
  233. }
  234. int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
  235. u64 start, u64 len, u64 disk_len,
  236. int type, int compress_type)
  237. {
  238. return __btrfs_add_ordered_extent(inode, file_offset, start, len,
  239. disk_len, type, 0,
  240. compress_type);
  241. }
  242. /*
  243. * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
  244. * when an ordered extent is finished. If the list covers more than one
  245. * ordered extent, it is split across multiples.
  246. */
  247. void btrfs_add_ordered_sum(struct inode *inode,
  248. struct btrfs_ordered_extent *entry,
  249. struct btrfs_ordered_sum *sum)
  250. {
  251. struct btrfs_ordered_inode_tree *tree;
  252. tree = &BTRFS_I(inode)->ordered_tree;
  253. spin_lock_irq(&tree->lock);
  254. list_add_tail(&sum->list, &entry->list);
  255. spin_unlock_irq(&tree->lock);
  256. }
  257. /*
  258. * this is used to account for finished IO across a given range
  259. * of the file. The IO may span ordered extents. If
  260. * a given ordered_extent is completely done, 1 is returned, otherwise
  261. * 0.
  262. *
  263. * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
  264. * to make sure this function only returns 1 once for a given ordered extent.
  265. *
  266. * file_offset is updated to one byte past the range that is recorded as
  267. * complete. This allows you to walk forward in the file.
  268. */
  269. int btrfs_dec_test_first_ordered_pending(struct inode *inode,
  270. struct btrfs_ordered_extent **cached,
  271. u64 *file_offset, u64 io_size, int uptodate)
  272. {
  273. struct btrfs_ordered_inode_tree *tree;
  274. struct rb_node *node;
  275. struct btrfs_ordered_extent *entry = NULL;
  276. int ret;
  277. unsigned long flags;
  278. u64 dec_end;
  279. u64 dec_start;
  280. u64 to_dec;
  281. tree = &BTRFS_I(inode)->ordered_tree;
  282. spin_lock_irqsave(&tree->lock, flags);
  283. node = tree_search(tree, *file_offset);
  284. if (!node) {
  285. ret = 1;
  286. goto out;
  287. }
  288. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  289. if (!offset_in_entry(entry, *file_offset)) {
  290. ret = 1;
  291. goto out;
  292. }
  293. dec_start = max(*file_offset, entry->file_offset);
  294. dec_end = min(*file_offset + io_size, entry->file_offset +
  295. entry->len);
  296. *file_offset = dec_end;
  297. if (dec_start > dec_end) {
  298. btrfs_crit(BTRFS_I(inode)->root->fs_info,
  299. "bad ordering dec_start %llu end %llu", dec_start, dec_end);
  300. }
  301. to_dec = dec_end - dec_start;
  302. if (to_dec > entry->bytes_left) {
  303. btrfs_crit(BTRFS_I(inode)->root->fs_info,
  304. "bad ordered accounting left %llu size %llu",
  305. entry->bytes_left, to_dec);
  306. }
  307. entry->bytes_left -= to_dec;
  308. if (!uptodate)
  309. set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
  310. if (entry->bytes_left == 0) {
  311. ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
  312. if (waitqueue_active(&entry->wait))
  313. wake_up(&entry->wait);
  314. } else {
  315. ret = 1;
  316. }
  317. out:
  318. if (!ret && cached && entry) {
  319. *cached = entry;
  320. atomic_inc(&entry->refs);
  321. }
  322. spin_unlock_irqrestore(&tree->lock, flags);
  323. return ret == 0;
  324. }
  325. /*
  326. * this is used to account for finished IO across a given range
  327. * of the file. The IO should not span ordered extents. If
  328. * a given ordered_extent is completely done, 1 is returned, otherwise
  329. * 0.
  330. *
  331. * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
  332. * to make sure this function only returns 1 once for a given ordered extent.
  333. */
  334. int btrfs_dec_test_ordered_pending(struct inode *inode,
  335. struct btrfs_ordered_extent **cached,
  336. u64 file_offset, u64 io_size, int uptodate)
  337. {
  338. struct btrfs_ordered_inode_tree *tree;
  339. struct rb_node *node;
  340. struct btrfs_ordered_extent *entry = NULL;
  341. unsigned long flags;
  342. int ret;
  343. tree = &BTRFS_I(inode)->ordered_tree;
  344. spin_lock_irqsave(&tree->lock, flags);
  345. if (cached && *cached) {
  346. entry = *cached;
  347. goto have_entry;
  348. }
  349. node = tree_search(tree, file_offset);
  350. if (!node) {
  351. ret = 1;
  352. goto out;
  353. }
  354. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  355. have_entry:
  356. if (!offset_in_entry(entry, file_offset)) {
  357. ret = 1;
  358. goto out;
  359. }
  360. if (io_size > entry->bytes_left) {
  361. btrfs_crit(BTRFS_I(inode)->root->fs_info,
  362. "bad ordered accounting left %llu size %llu",
  363. entry->bytes_left, io_size);
  364. }
  365. entry->bytes_left -= io_size;
  366. if (!uptodate)
  367. set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
  368. if (entry->bytes_left == 0) {
  369. ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
  370. if (waitqueue_active(&entry->wait))
  371. wake_up(&entry->wait);
  372. } else {
  373. ret = 1;
  374. }
  375. out:
  376. if (!ret && cached && entry) {
  377. *cached = entry;
  378. atomic_inc(&entry->refs);
  379. }
  380. spin_unlock_irqrestore(&tree->lock, flags);
  381. return ret == 0;
  382. }
  383. /* Needs to either be called under a log transaction or the log_mutex */
  384. void btrfs_get_logged_extents(struct inode *inode,
  385. struct list_head *logged_list,
  386. const loff_t start,
  387. const loff_t end)
  388. {
  389. struct btrfs_ordered_inode_tree *tree;
  390. struct btrfs_ordered_extent *ordered;
  391. struct rb_node *n;
  392. struct rb_node *prev;
  393. tree = &BTRFS_I(inode)->ordered_tree;
  394. spin_lock_irq(&tree->lock);
  395. n = __tree_search(&tree->tree, end, &prev);
  396. if (!n)
  397. n = prev;
  398. for (; n; n = rb_prev(n)) {
  399. ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  400. if (ordered->file_offset > end)
  401. continue;
  402. if (entry_end(ordered) <= start)
  403. break;
  404. if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
  405. continue;
  406. list_add(&ordered->log_list, logged_list);
  407. atomic_inc(&ordered->refs);
  408. }
  409. spin_unlock_irq(&tree->lock);
  410. }
  411. void btrfs_put_logged_extents(struct list_head *logged_list)
  412. {
  413. struct btrfs_ordered_extent *ordered;
  414. while (!list_empty(logged_list)) {
  415. ordered = list_first_entry(logged_list,
  416. struct btrfs_ordered_extent,
  417. log_list);
  418. list_del_init(&ordered->log_list);
  419. btrfs_put_ordered_extent(ordered);
  420. }
  421. }
  422. void btrfs_submit_logged_extents(struct list_head *logged_list,
  423. struct btrfs_root *log)
  424. {
  425. int index = log->log_transid % 2;
  426. spin_lock_irq(&log->log_extents_lock[index]);
  427. list_splice_tail(logged_list, &log->logged_list[index]);
  428. spin_unlock_irq(&log->log_extents_lock[index]);
  429. }
  430. void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
  431. struct btrfs_root *log, u64 transid)
  432. {
  433. struct btrfs_ordered_extent *ordered;
  434. int index = transid % 2;
  435. spin_lock_irq(&log->log_extents_lock[index]);
  436. while (!list_empty(&log->logged_list[index])) {
  437. ordered = list_first_entry(&log->logged_list[index],
  438. struct btrfs_ordered_extent,
  439. log_list);
  440. list_del_init(&ordered->log_list);
  441. spin_unlock_irq(&log->log_extents_lock[index]);
  442. if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
  443. !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
  444. struct inode *inode = ordered->inode;
  445. u64 start = ordered->file_offset;
  446. u64 end = ordered->file_offset + ordered->len - 1;
  447. WARN_ON(!inode);
  448. filemap_fdatawrite_range(inode->i_mapping, start, end);
  449. }
  450. wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
  451. &ordered->flags));
  452. /*
  453. * If our ordered extent completed it means it updated the
  454. * fs/subvol and csum trees already, so no need to make the
  455. * current transaction's commit wait for it, as we end up
  456. * holding memory unnecessarily and delaying the inode's iput
  457. * until the transaction commit (we schedule an iput for the
  458. * inode when the ordered extent's refcount drops to 0), which
  459. * prevents it from being evictable until the transaction
  460. * commits.
  461. */
  462. if (test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags))
  463. btrfs_put_ordered_extent(ordered);
  464. else
  465. list_add_tail(&ordered->trans_list, &trans->ordered);
  466. spin_lock_irq(&log->log_extents_lock[index]);
  467. }
  468. spin_unlock_irq(&log->log_extents_lock[index]);
  469. }
  470. void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
  471. {
  472. struct btrfs_ordered_extent *ordered;
  473. int index = transid % 2;
  474. spin_lock_irq(&log->log_extents_lock[index]);
  475. while (!list_empty(&log->logged_list[index])) {
  476. ordered = list_first_entry(&log->logged_list[index],
  477. struct btrfs_ordered_extent,
  478. log_list);
  479. list_del_init(&ordered->log_list);
  480. spin_unlock_irq(&log->log_extents_lock[index]);
  481. btrfs_put_ordered_extent(ordered);
  482. spin_lock_irq(&log->log_extents_lock[index]);
  483. }
  484. spin_unlock_irq(&log->log_extents_lock[index]);
  485. }
  486. /*
  487. * used to drop a reference on an ordered extent. This will free
  488. * the extent if the last reference is dropped
  489. */
  490. void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
  491. {
  492. struct list_head *cur;
  493. struct btrfs_ordered_sum *sum;
  494. trace_btrfs_ordered_extent_put(entry->inode, entry);
  495. if (atomic_dec_and_test(&entry->refs)) {
  496. ASSERT(list_empty(&entry->log_list));
  497. ASSERT(list_empty(&entry->trans_list));
  498. ASSERT(list_empty(&entry->root_extent_list));
  499. ASSERT(RB_EMPTY_NODE(&entry->rb_node));
  500. if (entry->inode)
  501. btrfs_add_delayed_iput(entry->inode);
  502. while (!list_empty(&entry->list)) {
  503. cur = entry->list.next;
  504. sum = list_entry(cur, struct btrfs_ordered_sum, list);
  505. list_del(&sum->list);
  506. kfree(sum);
  507. }
  508. kmem_cache_free(btrfs_ordered_extent_cache, entry);
  509. }
  510. }
  511. /*
  512. * remove an ordered extent from the tree. No references are dropped
  513. * and waiters are woken up.
  514. */
  515. void btrfs_remove_ordered_extent(struct inode *inode,
  516. struct btrfs_ordered_extent *entry)
  517. {
  518. struct btrfs_ordered_inode_tree *tree;
  519. struct btrfs_root *root = BTRFS_I(inode)->root;
  520. struct rb_node *node;
  521. tree = &BTRFS_I(inode)->ordered_tree;
  522. spin_lock_irq(&tree->lock);
  523. node = &entry->rb_node;
  524. rb_erase(node, &tree->tree);
  525. RB_CLEAR_NODE(node);
  526. if (tree->last == node)
  527. tree->last = NULL;
  528. set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
  529. spin_unlock_irq(&tree->lock);
  530. spin_lock(&root->ordered_extent_lock);
  531. list_del_init(&entry->root_extent_list);
  532. root->nr_ordered_extents--;
  533. trace_btrfs_ordered_extent_remove(inode, entry);
  534. if (!root->nr_ordered_extents) {
  535. spin_lock(&root->fs_info->ordered_root_lock);
  536. BUG_ON(list_empty(&root->ordered_root));
  537. list_del_init(&root->ordered_root);
  538. spin_unlock(&root->fs_info->ordered_root_lock);
  539. }
  540. spin_unlock(&root->ordered_extent_lock);
  541. wake_up(&entry->wait);
  542. }
  543. static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
  544. {
  545. struct btrfs_ordered_extent *ordered;
  546. ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
  547. btrfs_start_ordered_extent(ordered->inode, ordered, 1);
  548. complete(&ordered->completion);
  549. }
  550. /*
  551. * wait for all the ordered extents in a root. This is done when balancing
  552. * space between drives.
  553. */
  554. int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
  555. {
  556. struct list_head splice, works;
  557. struct btrfs_ordered_extent *ordered, *next;
  558. int count = 0;
  559. INIT_LIST_HEAD(&splice);
  560. INIT_LIST_HEAD(&works);
  561. mutex_lock(&root->ordered_extent_mutex);
  562. spin_lock(&root->ordered_extent_lock);
  563. list_splice_init(&root->ordered_extents, &splice);
  564. while (!list_empty(&splice) && nr) {
  565. ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
  566. root_extent_list);
  567. list_move_tail(&ordered->root_extent_list,
  568. &root->ordered_extents);
  569. atomic_inc(&ordered->refs);
  570. spin_unlock(&root->ordered_extent_lock);
  571. btrfs_init_work(&ordered->flush_work,
  572. btrfs_flush_delalloc_helper,
  573. btrfs_run_ordered_extent_work, NULL, NULL);
  574. list_add_tail(&ordered->work_list, &works);
  575. btrfs_queue_work(root->fs_info->flush_workers,
  576. &ordered->flush_work);
  577. cond_resched();
  578. spin_lock(&root->ordered_extent_lock);
  579. if (nr != -1)
  580. nr--;
  581. count++;
  582. }
  583. list_splice_tail(&splice, &root->ordered_extents);
  584. spin_unlock(&root->ordered_extent_lock);
  585. list_for_each_entry_safe(ordered, next, &works, work_list) {
  586. list_del_init(&ordered->work_list);
  587. wait_for_completion(&ordered->completion);
  588. btrfs_put_ordered_extent(ordered);
  589. cond_resched();
  590. }
  591. mutex_unlock(&root->ordered_extent_mutex);
  592. return count;
  593. }
  594. void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
  595. {
  596. struct btrfs_root *root;
  597. struct list_head splice;
  598. int done;
  599. INIT_LIST_HEAD(&splice);
  600. mutex_lock(&fs_info->ordered_operations_mutex);
  601. spin_lock(&fs_info->ordered_root_lock);
  602. list_splice_init(&fs_info->ordered_roots, &splice);
  603. while (!list_empty(&splice) && nr) {
  604. root = list_first_entry(&splice, struct btrfs_root,
  605. ordered_root);
  606. root = btrfs_grab_fs_root(root);
  607. BUG_ON(!root);
  608. list_move_tail(&root->ordered_root,
  609. &fs_info->ordered_roots);
  610. spin_unlock(&fs_info->ordered_root_lock);
  611. done = btrfs_wait_ordered_extents(root, nr);
  612. btrfs_put_fs_root(root);
  613. spin_lock(&fs_info->ordered_root_lock);
  614. if (nr != -1) {
  615. nr -= done;
  616. WARN_ON(nr < 0);
  617. }
  618. }
  619. list_splice_tail(&splice, &fs_info->ordered_roots);
  620. spin_unlock(&fs_info->ordered_root_lock);
  621. mutex_unlock(&fs_info->ordered_operations_mutex);
  622. }
  623. /*
  624. * Used to start IO or wait for a given ordered extent to finish.
  625. *
  626. * If wait is one, this effectively waits on page writeback for all the pages
  627. * in the extent, and it waits on the io completion code to insert
  628. * metadata into the btree corresponding to the extent
  629. */
  630. void btrfs_start_ordered_extent(struct inode *inode,
  631. struct btrfs_ordered_extent *entry,
  632. int wait)
  633. {
  634. u64 start = entry->file_offset;
  635. u64 end = start + entry->len - 1;
  636. trace_btrfs_ordered_extent_start(inode, entry);
  637. /*
  638. * pages in the range can be dirty, clean or writeback. We
  639. * start IO on any dirty ones so the wait doesn't stall waiting
  640. * for the flusher thread to find them
  641. */
  642. if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
  643. filemap_fdatawrite_range(inode->i_mapping, start, end);
  644. if (wait) {
  645. wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
  646. &entry->flags));
  647. }
  648. }
  649. /*
  650. * Used to wait on ordered extents across a large range of bytes.
  651. */
  652. int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
  653. {
  654. int ret = 0;
  655. int ret_wb = 0;
  656. u64 end;
  657. u64 orig_end;
  658. struct btrfs_ordered_extent *ordered;
  659. if (start + len < start) {
  660. orig_end = INT_LIMIT(loff_t);
  661. } else {
  662. orig_end = start + len - 1;
  663. if (orig_end > INT_LIMIT(loff_t))
  664. orig_end = INT_LIMIT(loff_t);
  665. }
  666. /* start IO across the range first to instantiate any delalloc
  667. * extents
  668. */
  669. ret = btrfs_fdatawrite_range(inode, start, orig_end);
  670. if (ret)
  671. return ret;
  672. /*
  673. * If we have a writeback error don't return immediately. Wait first
  674. * for any ordered extents that haven't completed yet. This is to make
  675. * sure no one can dirty the same page ranges and call writepages()
  676. * before the ordered extents complete - to avoid failures (-EEXIST)
  677. * when adding the new ordered extents to the ordered tree.
  678. */
  679. ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
  680. end = orig_end;
  681. while (1) {
  682. ordered = btrfs_lookup_first_ordered_extent(inode, end);
  683. if (!ordered)
  684. break;
  685. if (ordered->file_offset > orig_end) {
  686. btrfs_put_ordered_extent(ordered);
  687. break;
  688. }
  689. if (ordered->file_offset + ordered->len <= start) {
  690. btrfs_put_ordered_extent(ordered);
  691. break;
  692. }
  693. btrfs_start_ordered_extent(inode, ordered, 1);
  694. end = ordered->file_offset;
  695. if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
  696. ret = -EIO;
  697. btrfs_put_ordered_extent(ordered);
  698. if (ret || end == 0 || end == start)
  699. break;
  700. end--;
  701. }
  702. return ret_wb ? ret_wb : ret;
  703. }
  704. /*
  705. * find an ordered extent corresponding to file_offset. return NULL if
  706. * nothing is found, otherwise take a reference on the extent and return it
  707. */
  708. struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
  709. u64 file_offset)
  710. {
  711. struct btrfs_ordered_inode_tree *tree;
  712. struct rb_node *node;
  713. struct btrfs_ordered_extent *entry = NULL;
  714. tree = &BTRFS_I(inode)->ordered_tree;
  715. spin_lock_irq(&tree->lock);
  716. node = tree_search(tree, file_offset);
  717. if (!node)
  718. goto out;
  719. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  720. if (!offset_in_entry(entry, file_offset))
  721. entry = NULL;
  722. if (entry)
  723. atomic_inc(&entry->refs);
  724. out:
  725. spin_unlock_irq(&tree->lock);
  726. return entry;
  727. }
  728. /* Since the DIO code tries to lock a wide area we need to look for any ordered
  729. * extents that exist in the range, rather than just the start of the range.
  730. */
  731. struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
  732. u64 file_offset,
  733. u64 len)
  734. {
  735. struct btrfs_ordered_inode_tree *tree;
  736. struct rb_node *node;
  737. struct btrfs_ordered_extent *entry = NULL;
  738. tree = &BTRFS_I(inode)->ordered_tree;
  739. spin_lock_irq(&tree->lock);
  740. node = tree_search(tree, file_offset);
  741. if (!node) {
  742. node = tree_search(tree, file_offset + len);
  743. if (!node)
  744. goto out;
  745. }
  746. while (1) {
  747. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  748. if (range_overlaps(entry, file_offset, len))
  749. break;
  750. if (entry->file_offset >= file_offset + len) {
  751. entry = NULL;
  752. break;
  753. }
  754. entry = NULL;
  755. node = rb_next(node);
  756. if (!node)
  757. break;
  758. }
  759. out:
  760. if (entry)
  761. atomic_inc(&entry->refs);
  762. spin_unlock_irq(&tree->lock);
  763. return entry;
  764. }
  765. bool btrfs_have_ordered_extents_in_range(struct inode *inode,
  766. u64 file_offset,
  767. u64 len)
  768. {
  769. struct btrfs_ordered_extent *oe;
  770. oe = btrfs_lookup_ordered_range(inode, file_offset, len);
  771. if (oe) {
  772. btrfs_put_ordered_extent(oe);
  773. return true;
  774. }
  775. return false;
  776. }
  777. /*
  778. * lookup and return any extent before 'file_offset'. NULL is returned
  779. * if none is found
  780. */
  781. struct btrfs_ordered_extent *
  782. btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
  783. {
  784. struct btrfs_ordered_inode_tree *tree;
  785. struct rb_node *node;
  786. struct btrfs_ordered_extent *entry = NULL;
  787. tree = &BTRFS_I(inode)->ordered_tree;
  788. spin_lock_irq(&tree->lock);
  789. node = tree_search(tree, file_offset);
  790. if (!node)
  791. goto out;
  792. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  793. atomic_inc(&entry->refs);
  794. out:
  795. spin_unlock_irq(&tree->lock);
  796. return entry;
  797. }
  798. /*
  799. * After an extent is done, call this to conditionally update the on disk
  800. * i_size. i_size is updated to cover any fully written part of the file.
  801. */
  802. int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
  803. struct btrfs_ordered_extent *ordered)
  804. {
  805. struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
  806. u64 disk_i_size;
  807. u64 new_i_size;
  808. u64 i_size = i_size_read(inode);
  809. struct rb_node *node;
  810. struct rb_node *prev = NULL;
  811. struct btrfs_ordered_extent *test;
  812. int ret = 1;
  813. spin_lock_irq(&tree->lock);
  814. if (ordered) {
  815. offset = entry_end(ordered);
  816. if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
  817. offset = min(offset,
  818. ordered->file_offset +
  819. ordered->truncated_len);
  820. } else {
  821. offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
  822. }
  823. disk_i_size = BTRFS_I(inode)->disk_i_size;
  824. /* truncate file */
  825. if (disk_i_size > i_size) {
  826. BTRFS_I(inode)->disk_i_size = i_size;
  827. ret = 0;
  828. goto out;
  829. }
  830. /*
  831. * if the disk i_size is already at the inode->i_size, or
  832. * this ordered extent is inside the disk i_size, we're done
  833. */
  834. if (disk_i_size == i_size)
  835. goto out;
  836. /*
  837. * We still need to update disk_i_size if outstanding_isize is greater
  838. * than disk_i_size.
  839. */
  840. if (offset <= disk_i_size &&
  841. (!ordered || ordered->outstanding_isize <= disk_i_size))
  842. goto out;
  843. /*
  844. * walk backward from this ordered extent to disk_i_size.
  845. * if we find an ordered extent then we can't update disk i_size
  846. * yet
  847. */
  848. if (ordered) {
  849. node = rb_prev(&ordered->rb_node);
  850. } else {
  851. prev = tree_search(tree, offset);
  852. /*
  853. * we insert file extents without involving ordered struct,
  854. * so there should be no ordered struct cover this offset
  855. */
  856. if (prev) {
  857. test = rb_entry(prev, struct btrfs_ordered_extent,
  858. rb_node);
  859. BUG_ON(offset_in_entry(test, offset));
  860. }
  861. node = prev;
  862. }
  863. for (; node; node = rb_prev(node)) {
  864. test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  865. /* We treat this entry as if it doesnt exist */
  866. if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
  867. continue;
  868. if (test->file_offset + test->len <= disk_i_size)
  869. break;
  870. if (test->file_offset >= i_size)
  871. break;
  872. if (entry_end(test) > disk_i_size) {
  873. /*
  874. * we don't update disk_i_size now, so record this
  875. * undealt i_size. Or we will not know the real
  876. * i_size.
  877. */
  878. if (test->outstanding_isize < offset)
  879. test->outstanding_isize = offset;
  880. if (ordered &&
  881. ordered->outstanding_isize >
  882. test->outstanding_isize)
  883. test->outstanding_isize =
  884. ordered->outstanding_isize;
  885. goto out;
  886. }
  887. }
  888. new_i_size = min_t(u64, offset, i_size);
  889. /*
  890. * Some ordered extents may completed before the current one, and
  891. * we hold the real i_size in ->outstanding_isize.
  892. */
  893. if (ordered && ordered->outstanding_isize > new_i_size)
  894. new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
  895. BTRFS_I(inode)->disk_i_size = new_i_size;
  896. ret = 0;
  897. out:
  898. /*
  899. * We need to do this because we can't remove ordered extents until
  900. * after the i_disk_size has been updated and then the inode has been
  901. * updated to reflect the change, so we need to tell anybody who finds
  902. * this ordered extent that we've already done all the real work, we
  903. * just haven't completed all the other work.
  904. */
  905. if (ordered)
  906. set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
  907. spin_unlock_irq(&tree->lock);
  908. return ret;
  909. }
  910. /*
  911. * search the ordered extents for one corresponding to 'offset' and
  912. * try to find a checksum. This is used because we allow pages to
  913. * be reclaimed before their checksum is actually put into the btree
  914. */
  915. int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
  916. u32 *sum, int len)
  917. {
  918. struct btrfs_ordered_sum *ordered_sum;
  919. struct btrfs_ordered_extent *ordered;
  920. struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
  921. unsigned long num_sectors;
  922. unsigned long i;
  923. u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
  924. int index = 0;
  925. ordered = btrfs_lookup_ordered_extent(inode, offset);
  926. if (!ordered)
  927. return 0;
  928. spin_lock_irq(&tree->lock);
  929. list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
  930. if (disk_bytenr >= ordered_sum->bytenr &&
  931. disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
  932. i = (disk_bytenr - ordered_sum->bytenr) >>
  933. inode->i_sb->s_blocksize_bits;
  934. num_sectors = ordered_sum->len >>
  935. inode->i_sb->s_blocksize_bits;
  936. num_sectors = min_t(int, len - index, num_sectors - i);
  937. memcpy(sum + index, ordered_sum->sums + i,
  938. num_sectors);
  939. index += (int)num_sectors;
  940. if (index == len)
  941. goto out;
  942. disk_bytenr += num_sectors * sectorsize;
  943. }
  944. }
  945. out:
  946. spin_unlock_irq(&tree->lock);
  947. btrfs_put_ordered_extent(ordered);
  948. return index;
  949. }
  950. int __init ordered_data_init(void)
  951. {
  952. btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
  953. sizeof(struct btrfs_ordered_extent), 0,
  954. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  955. NULL);
  956. if (!btrfs_ordered_extent_cache)
  957. return -ENOMEM;
  958. return 0;
  959. }
  960. void ordered_data_exit(void)
  961. {
  962. if (btrfs_ordered_extent_cache)
  963. kmem_cache_destroy(btrfs_ordered_extent_cache);
  964. }