ordered-data.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/writeback.h>
  21. #include <linux/pagevec.h>
  22. #include "ctree.h"
  23. #include "transaction.h"
  24. #include "btrfs_inode.h"
  25. #include "extent_io.h"
  26. #include "disk-io.h"
  27. #include "compression.h"
  28. static struct kmem_cache *btrfs_ordered_extent_cache;
  29. static u64 entry_end(struct btrfs_ordered_extent *entry)
  30. {
  31. if (entry->file_offset + entry->len < entry->file_offset)
  32. return (u64)-1;
  33. return entry->file_offset + entry->len;
  34. }
  35. /* returns NULL if the insertion worked, or it returns the node it did find
  36. * in the tree
  37. */
  38. static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  39. struct rb_node *node)
  40. {
  41. struct rb_node **p = &root->rb_node;
  42. struct rb_node *parent = NULL;
  43. struct btrfs_ordered_extent *entry;
  44. while (*p) {
  45. parent = *p;
  46. entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  47. if (file_offset < entry->file_offset)
  48. p = &(*p)->rb_left;
  49. else if (file_offset >= entry_end(entry))
  50. p = &(*p)->rb_right;
  51. else
  52. return parent;
  53. }
  54. rb_link_node(node, parent, p);
  55. rb_insert_color(node, root);
  56. return NULL;
  57. }
  58. static void ordered_data_tree_panic(struct inode *inode, int errno,
  59. u64 offset)
  60. {
  61. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  62. btrfs_panic(fs_info, errno,
  63. "Inconsistency in ordered tree at offset %llu", offset);
  64. }
  65. /*
  66. * look for a given offset in the tree, and if it can't be found return the
  67. * first lesser offset
  68. */
  69. static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  70. struct rb_node **prev_ret)
  71. {
  72. struct rb_node *n = root->rb_node;
  73. struct rb_node *prev = NULL;
  74. struct rb_node *test;
  75. struct btrfs_ordered_extent *entry;
  76. struct btrfs_ordered_extent *prev_entry = NULL;
  77. while (n) {
  78. entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  79. prev = n;
  80. prev_entry = entry;
  81. if (file_offset < entry->file_offset)
  82. n = n->rb_left;
  83. else if (file_offset >= entry_end(entry))
  84. n = n->rb_right;
  85. else
  86. return n;
  87. }
  88. if (!prev_ret)
  89. return NULL;
  90. while (prev && file_offset >= entry_end(prev_entry)) {
  91. test = rb_next(prev);
  92. if (!test)
  93. break;
  94. prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  95. rb_node);
  96. if (file_offset < entry_end(prev_entry))
  97. break;
  98. prev = test;
  99. }
  100. if (prev)
  101. prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
  102. rb_node);
  103. while (prev && file_offset < entry_end(prev_entry)) {
  104. test = rb_prev(prev);
  105. if (!test)
  106. break;
  107. prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  108. rb_node);
  109. prev = test;
  110. }
  111. *prev_ret = prev;
  112. return NULL;
  113. }
  114. /*
  115. * helper to check if a given offset is inside a given entry
  116. */
  117. static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
  118. {
  119. if (file_offset < entry->file_offset ||
  120. entry->file_offset + entry->len <= file_offset)
  121. return 0;
  122. return 1;
  123. }
  124. static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
  125. u64 len)
  126. {
  127. if (file_offset + len <= entry->file_offset ||
  128. entry->file_offset + entry->len <= file_offset)
  129. return 0;
  130. return 1;
  131. }
  132. /*
  133. * look find the first ordered struct that has this offset, otherwise
  134. * the first one less than this offset
  135. */
  136. static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
  137. u64 file_offset)
  138. {
  139. struct rb_root *root = &tree->tree;
  140. struct rb_node *prev = NULL;
  141. struct rb_node *ret;
  142. struct btrfs_ordered_extent *entry;
  143. if (tree->last) {
  144. entry = rb_entry(tree->last, struct btrfs_ordered_extent,
  145. rb_node);
  146. if (offset_in_entry(entry, file_offset))
  147. return tree->last;
  148. }
  149. ret = __tree_search(root, file_offset, &prev);
  150. if (!ret)
  151. ret = prev;
  152. if (ret)
  153. tree->last = ret;
  154. return ret;
  155. }
  156. /* allocate and add a new ordered_extent into the per-inode tree.
  157. * file_offset is the logical offset in the file
  158. *
  159. * start is the disk block number of an extent already reserved in the
  160. * extent allocation tree
  161. *
  162. * len is the length of the extent
  163. *
  164. * The tree is given a single reference on the ordered extent that was
  165. * inserted.
  166. */
  167. static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
  168. u64 start, u64 len, u64 disk_len,
  169. int type, int dio, int compress_type)
  170. {
  171. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  172. struct btrfs_root *root = BTRFS_I(inode)->root;
  173. struct btrfs_ordered_inode_tree *tree;
  174. struct rb_node *node;
  175. struct btrfs_ordered_extent *entry;
  176. tree = &BTRFS_I(inode)->ordered_tree;
  177. entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
  178. if (!entry)
  179. return -ENOMEM;
  180. entry->file_offset = file_offset;
  181. entry->start = start;
  182. entry->len = len;
  183. entry->disk_len = disk_len;
  184. entry->bytes_left = len;
  185. entry->inode = igrab(inode);
  186. entry->compress_type = compress_type;
  187. entry->truncated_len = (u64)-1;
  188. if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
  189. set_bit(type, &entry->flags);
  190. if (dio)
  191. set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
  192. /* one ref for the tree */
  193. atomic_set(&entry->refs, 1);
  194. init_waitqueue_head(&entry->wait);
  195. INIT_LIST_HEAD(&entry->list);
  196. INIT_LIST_HEAD(&entry->root_extent_list);
  197. INIT_LIST_HEAD(&entry->work_list);
  198. init_completion(&entry->completion);
  199. INIT_LIST_HEAD(&entry->log_list);
  200. INIT_LIST_HEAD(&entry->trans_list);
  201. trace_btrfs_ordered_extent_add(inode, entry);
  202. spin_lock_irq(&tree->lock);
  203. node = tree_insert(&tree->tree, file_offset,
  204. &entry->rb_node);
  205. if (node)
  206. ordered_data_tree_panic(inode, -EEXIST, file_offset);
  207. spin_unlock_irq(&tree->lock);
  208. spin_lock(&root->ordered_extent_lock);
  209. list_add_tail(&entry->root_extent_list,
  210. &root->ordered_extents);
  211. root->nr_ordered_extents++;
  212. if (root->nr_ordered_extents == 1) {
  213. spin_lock(&fs_info->ordered_root_lock);
  214. BUG_ON(!list_empty(&root->ordered_root));
  215. list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
  216. spin_unlock(&fs_info->ordered_root_lock);
  217. }
  218. spin_unlock(&root->ordered_extent_lock);
  219. return 0;
  220. }
  221. int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
  222. u64 start, u64 len, u64 disk_len, int type)
  223. {
  224. return __btrfs_add_ordered_extent(inode, file_offset, start, len,
  225. disk_len, type, 0,
  226. BTRFS_COMPRESS_NONE);
  227. }
  228. int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
  229. u64 start, u64 len, u64 disk_len, int type)
  230. {
  231. return __btrfs_add_ordered_extent(inode, file_offset, start, len,
  232. disk_len, type, 1,
  233. BTRFS_COMPRESS_NONE);
  234. }
  235. int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
  236. u64 start, u64 len, u64 disk_len,
  237. int type, int compress_type)
  238. {
  239. return __btrfs_add_ordered_extent(inode, file_offset, start, len,
  240. disk_len, type, 0,
  241. compress_type);
  242. }
  243. /*
  244. * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
  245. * when an ordered extent is finished. If the list covers more than one
  246. * ordered extent, it is split across multiples.
  247. */
  248. void btrfs_add_ordered_sum(struct inode *inode,
  249. struct btrfs_ordered_extent *entry,
  250. struct btrfs_ordered_sum *sum)
  251. {
  252. struct btrfs_ordered_inode_tree *tree;
  253. tree = &BTRFS_I(inode)->ordered_tree;
  254. spin_lock_irq(&tree->lock);
  255. list_add_tail(&sum->list, &entry->list);
  256. spin_unlock_irq(&tree->lock);
  257. }
  258. /*
  259. * this is used to account for finished IO across a given range
  260. * of the file. The IO may span ordered extents. If
  261. * a given ordered_extent is completely done, 1 is returned, otherwise
  262. * 0.
  263. *
  264. * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
  265. * to make sure this function only returns 1 once for a given ordered extent.
  266. *
  267. * file_offset is updated to one byte past the range that is recorded as
  268. * complete. This allows you to walk forward in the file.
  269. */
  270. int btrfs_dec_test_first_ordered_pending(struct inode *inode,
  271. struct btrfs_ordered_extent **cached,
  272. u64 *file_offset, u64 io_size, int uptodate)
  273. {
  274. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  275. struct btrfs_ordered_inode_tree *tree;
  276. struct rb_node *node;
  277. struct btrfs_ordered_extent *entry = NULL;
  278. int ret;
  279. unsigned long flags;
  280. u64 dec_end;
  281. u64 dec_start;
  282. u64 to_dec;
  283. tree = &BTRFS_I(inode)->ordered_tree;
  284. spin_lock_irqsave(&tree->lock, flags);
  285. node = tree_search(tree, *file_offset);
  286. if (!node) {
  287. ret = 1;
  288. goto out;
  289. }
  290. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  291. if (!offset_in_entry(entry, *file_offset)) {
  292. ret = 1;
  293. goto out;
  294. }
  295. dec_start = max(*file_offset, entry->file_offset);
  296. dec_end = min(*file_offset + io_size, entry->file_offset +
  297. entry->len);
  298. *file_offset = dec_end;
  299. if (dec_start > dec_end) {
  300. btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
  301. dec_start, dec_end);
  302. }
  303. to_dec = dec_end - dec_start;
  304. if (to_dec > entry->bytes_left) {
  305. btrfs_crit(fs_info,
  306. "bad ordered accounting left %llu size %llu",
  307. entry->bytes_left, to_dec);
  308. }
  309. entry->bytes_left -= to_dec;
  310. if (!uptodate)
  311. set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
  312. if (entry->bytes_left == 0) {
  313. ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
  314. /*
  315. * Implicit memory barrier after test_and_set_bit
  316. */
  317. if (waitqueue_active(&entry->wait))
  318. wake_up(&entry->wait);
  319. } else {
  320. ret = 1;
  321. }
  322. out:
  323. if (!ret && cached && entry) {
  324. *cached = entry;
  325. atomic_inc(&entry->refs);
  326. }
  327. spin_unlock_irqrestore(&tree->lock, flags);
  328. return ret == 0;
  329. }
  330. /*
  331. * this is used to account for finished IO across a given range
  332. * of the file. The IO should not span ordered extents. If
  333. * a given ordered_extent is completely done, 1 is returned, otherwise
  334. * 0.
  335. *
  336. * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
  337. * to make sure this function only returns 1 once for a given ordered extent.
  338. */
  339. int btrfs_dec_test_ordered_pending(struct inode *inode,
  340. struct btrfs_ordered_extent **cached,
  341. u64 file_offset, u64 io_size, int uptodate)
  342. {
  343. struct btrfs_ordered_inode_tree *tree;
  344. struct rb_node *node;
  345. struct btrfs_ordered_extent *entry = NULL;
  346. unsigned long flags;
  347. int ret;
  348. tree = &BTRFS_I(inode)->ordered_tree;
  349. spin_lock_irqsave(&tree->lock, flags);
  350. if (cached && *cached) {
  351. entry = *cached;
  352. goto have_entry;
  353. }
  354. node = tree_search(tree, file_offset);
  355. if (!node) {
  356. ret = 1;
  357. goto out;
  358. }
  359. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  360. have_entry:
  361. if (!offset_in_entry(entry, file_offset)) {
  362. ret = 1;
  363. goto out;
  364. }
  365. if (io_size > entry->bytes_left) {
  366. btrfs_crit(BTRFS_I(inode)->root->fs_info,
  367. "bad ordered accounting left %llu size %llu",
  368. entry->bytes_left, io_size);
  369. }
  370. entry->bytes_left -= io_size;
  371. if (!uptodate)
  372. set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
  373. if (entry->bytes_left == 0) {
  374. ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
  375. /*
  376. * Implicit memory barrier after test_and_set_bit
  377. */
  378. if (waitqueue_active(&entry->wait))
  379. wake_up(&entry->wait);
  380. } else {
  381. ret = 1;
  382. }
  383. out:
  384. if (!ret && cached && entry) {
  385. *cached = entry;
  386. atomic_inc(&entry->refs);
  387. }
  388. spin_unlock_irqrestore(&tree->lock, flags);
  389. return ret == 0;
  390. }
  391. /* Needs to either be called under a log transaction or the log_mutex */
  392. void btrfs_get_logged_extents(struct btrfs_inode *inode,
  393. struct list_head *logged_list,
  394. const loff_t start,
  395. const loff_t end)
  396. {
  397. struct btrfs_ordered_inode_tree *tree;
  398. struct btrfs_ordered_extent *ordered;
  399. struct rb_node *n;
  400. struct rb_node *prev;
  401. tree = &inode->ordered_tree;
  402. spin_lock_irq(&tree->lock);
  403. n = __tree_search(&tree->tree, end, &prev);
  404. if (!n)
  405. n = prev;
  406. for (; n; n = rb_prev(n)) {
  407. ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  408. if (ordered->file_offset > end)
  409. continue;
  410. if (entry_end(ordered) <= start)
  411. break;
  412. if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
  413. continue;
  414. list_add(&ordered->log_list, logged_list);
  415. atomic_inc(&ordered->refs);
  416. }
  417. spin_unlock_irq(&tree->lock);
  418. }
  419. void btrfs_put_logged_extents(struct list_head *logged_list)
  420. {
  421. struct btrfs_ordered_extent *ordered;
  422. while (!list_empty(logged_list)) {
  423. ordered = list_first_entry(logged_list,
  424. struct btrfs_ordered_extent,
  425. log_list);
  426. list_del_init(&ordered->log_list);
  427. btrfs_put_ordered_extent(ordered);
  428. }
  429. }
  430. void btrfs_submit_logged_extents(struct list_head *logged_list,
  431. struct btrfs_root *log)
  432. {
  433. int index = log->log_transid % 2;
  434. spin_lock_irq(&log->log_extents_lock[index]);
  435. list_splice_tail(logged_list, &log->logged_list[index]);
  436. spin_unlock_irq(&log->log_extents_lock[index]);
  437. }
  438. void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
  439. struct btrfs_root *log, u64 transid)
  440. {
  441. struct btrfs_ordered_extent *ordered;
  442. int index = transid % 2;
  443. spin_lock_irq(&log->log_extents_lock[index]);
  444. while (!list_empty(&log->logged_list[index])) {
  445. struct inode *inode;
  446. ordered = list_first_entry(&log->logged_list[index],
  447. struct btrfs_ordered_extent,
  448. log_list);
  449. list_del_init(&ordered->log_list);
  450. inode = ordered->inode;
  451. spin_unlock_irq(&log->log_extents_lock[index]);
  452. if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
  453. !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
  454. u64 start = ordered->file_offset;
  455. u64 end = ordered->file_offset + ordered->len - 1;
  456. WARN_ON(!inode);
  457. filemap_fdatawrite_range(inode->i_mapping, start, end);
  458. }
  459. wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
  460. &ordered->flags));
  461. /*
  462. * In order to keep us from losing our ordered extent
  463. * information when committing the transaction we have to make
  464. * sure that any logged extents are completed when we go to
  465. * commit the transaction. To do this we simply increase the
  466. * current transactions pending_ordered counter and decrement it
  467. * when the ordered extent completes.
  468. */
  469. if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
  470. struct btrfs_ordered_inode_tree *tree;
  471. tree = &BTRFS_I(inode)->ordered_tree;
  472. spin_lock_irq(&tree->lock);
  473. if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
  474. set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
  475. atomic_inc(&trans->transaction->pending_ordered);
  476. }
  477. spin_unlock_irq(&tree->lock);
  478. }
  479. btrfs_put_ordered_extent(ordered);
  480. spin_lock_irq(&log->log_extents_lock[index]);
  481. }
  482. spin_unlock_irq(&log->log_extents_lock[index]);
  483. }
  484. void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
  485. {
  486. struct btrfs_ordered_extent *ordered;
  487. int index = transid % 2;
  488. spin_lock_irq(&log->log_extents_lock[index]);
  489. while (!list_empty(&log->logged_list[index])) {
  490. ordered = list_first_entry(&log->logged_list[index],
  491. struct btrfs_ordered_extent,
  492. log_list);
  493. list_del_init(&ordered->log_list);
  494. spin_unlock_irq(&log->log_extents_lock[index]);
  495. btrfs_put_ordered_extent(ordered);
  496. spin_lock_irq(&log->log_extents_lock[index]);
  497. }
  498. spin_unlock_irq(&log->log_extents_lock[index]);
  499. }
  500. /*
  501. * used to drop a reference on an ordered extent. This will free
  502. * the extent if the last reference is dropped
  503. */
  504. void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
  505. {
  506. struct list_head *cur;
  507. struct btrfs_ordered_sum *sum;
  508. trace_btrfs_ordered_extent_put(entry->inode, entry);
  509. if (atomic_dec_and_test(&entry->refs)) {
  510. ASSERT(list_empty(&entry->log_list));
  511. ASSERT(list_empty(&entry->trans_list));
  512. ASSERT(list_empty(&entry->root_extent_list));
  513. ASSERT(RB_EMPTY_NODE(&entry->rb_node));
  514. if (entry->inode)
  515. btrfs_add_delayed_iput(entry->inode);
  516. while (!list_empty(&entry->list)) {
  517. cur = entry->list.next;
  518. sum = list_entry(cur, struct btrfs_ordered_sum, list);
  519. list_del(&sum->list);
  520. kfree(sum);
  521. }
  522. kmem_cache_free(btrfs_ordered_extent_cache, entry);
  523. }
  524. }
  525. /*
  526. * remove an ordered extent from the tree. No references are dropped
  527. * and waiters are woken up.
  528. */
  529. void btrfs_remove_ordered_extent(struct inode *inode,
  530. struct btrfs_ordered_extent *entry)
  531. {
  532. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  533. struct btrfs_ordered_inode_tree *tree;
  534. struct btrfs_root *root = BTRFS_I(inode)->root;
  535. struct rb_node *node;
  536. bool dec_pending_ordered = false;
  537. tree = &BTRFS_I(inode)->ordered_tree;
  538. spin_lock_irq(&tree->lock);
  539. node = &entry->rb_node;
  540. rb_erase(node, &tree->tree);
  541. RB_CLEAR_NODE(node);
  542. if (tree->last == node)
  543. tree->last = NULL;
  544. set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
  545. if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
  546. dec_pending_ordered = true;
  547. spin_unlock_irq(&tree->lock);
  548. /*
  549. * The current running transaction is waiting on us, we need to let it
  550. * know that we're complete and wake it up.
  551. */
  552. if (dec_pending_ordered) {
  553. struct btrfs_transaction *trans;
  554. /*
  555. * The checks for trans are just a formality, it should be set,
  556. * but if it isn't we don't want to deref/assert under the spin
  557. * lock, so be nice and check if trans is set, but ASSERT() so
  558. * if it isn't set a developer will notice.
  559. */
  560. spin_lock(&fs_info->trans_lock);
  561. trans = fs_info->running_transaction;
  562. if (trans)
  563. atomic_inc(&trans->use_count);
  564. spin_unlock(&fs_info->trans_lock);
  565. ASSERT(trans);
  566. if (trans) {
  567. if (atomic_dec_and_test(&trans->pending_ordered))
  568. wake_up(&trans->pending_wait);
  569. btrfs_put_transaction(trans);
  570. }
  571. }
  572. spin_lock(&root->ordered_extent_lock);
  573. list_del_init(&entry->root_extent_list);
  574. root->nr_ordered_extents--;
  575. trace_btrfs_ordered_extent_remove(inode, entry);
  576. if (!root->nr_ordered_extents) {
  577. spin_lock(&fs_info->ordered_root_lock);
  578. BUG_ON(list_empty(&root->ordered_root));
  579. list_del_init(&root->ordered_root);
  580. spin_unlock(&fs_info->ordered_root_lock);
  581. }
  582. spin_unlock(&root->ordered_extent_lock);
  583. wake_up(&entry->wait);
  584. }
  585. static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
  586. {
  587. struct btrfs_ordered_extent *ordered;
  588. ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
  589. btrfs_start_ordered_extent(ordered->inode, ordered, 1);
  590. complete(&ordered->completion);
  591. }
  592. /*
  593. * wait for all the ordered extents in a root. This is done when balancing
  594. * space between drives.
  595. */
  596. int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
  597. const u64 range_start, const u64 range_len)
  598. {
  599. struct btrfs_fs_info *fs_info = root->fs_info;
  600. LIST_HEAD(splice);
  601. LIST_HEAD(skipped);
  602. LIST_HEAD(works);
  603. struct btrfs_ordered_extent *ordered, *next;
  604. int count = 0;
  605. const u64 range_end = range_start + range_len;
  606. mutex_lock(&root->ordered_extent_mutex);
  607. spin_lock(&root->ordered_extent_lock);
  608. list_splice_init(&root->ordered_extents, &splice);
  609. while (!list_empty(&splice) && nr) {
  610. ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
  611. root_extent_list);
  612. if (range_end <= ordered->start ||
  613. ordered->start + ordered->disk_len <= range_start) {
  614. list_move_tail(&ordered->root_extent_list, &skipped);
  615. cond_resched_lock(&root->ordered_extent_lock);
  616. continue;
  617. }
  618. list_move_tail(&ordered->root_extent_list,
  619. &root->ordered_extents);
  620. atomic_inc(&ordered->refs);
  621. spin_unlock(&root->ordered_extent_lock);
  622. btrfs_init_work(&ordered->flush_work,
  623. btrfs_flush_delalloc_helper,
  624. btrfs_run_ordered_extent_work, NULL, NULL);
  625. list_add_tail(&ordered->work_list, &works);
  626. btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
  627. cond_resched();
  628. spin_lock(&root->ordered_extent_lock);
  629. if (nr != -1)
  630. nr--;
  631. count++;
  632. }
  633. list_splice_tail(&skipped, &root->ordered_extents);
  634. list_splice_tail(&splice, &root->ordered_extents);
  635. spin_unlock(&root->ordered_extent_lock);
  636. list_for_each_entry_safe(ordered, next, &works, work_list) {
  637. list_del_init(&ordered->work_list);
  638. wait_for_completion(&ordered->completion);
  639. btrfs_put_ordered_extent(ordered);
  640. cond_resched();
  641. }
  642. mutex_unlock(&root->ordered_extent_mutex);
  643. return count;
  644. }
  645. int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
  646. const u64 range_start, const u64 range_len)
  647. {
  648. struct btrfs_root *root;
  649. struct list_head splice;
  650. int done;
  651. int total_done = 0;
  652. INIT_LIST_HEAD(&splice);
  653. mutex_lock(&fs_info->ordered_operations_mutex);
  654. spin_lock(&fs_info->ordered_root_lock);
  655. list_splice_init(&fs_info->ordered_roots, &splice);
  656. while (!list_empty(&splice) && nr) {
  657. root = list_first_entry(&splice, struct btrfs_root,
  658. ordered_root);
  659. root = btrfs_grab_fs_root(root);
  660. BUG_ON(!root);
  661. list_move_tail(&root->ordered_root,
  662. &fs_info->ordered_roots);
  663. spin_unlock(&fs_info->ordered_root_lock);
  664. done = btrfs_wait_ordered_extents(root, nr,
  665. range_start, range_len);
  666. btrfs_put_fs_root(root);
  667. total_done += done;
  668. spin_lock(&fs_info->ordered_root_lock);
  669. if (nr != -1) {
  670. nr -= done;
  671. WARN_ON(nr < 0);
  672. }
  673. }
  674. list_splice_tail(&splice, &fs_info->ordered_roots);
  675. spin_unlock(&fs_info->ordered_root_lock);
  676. mutex_unlock(&fs_info->ordered_operations_mutex);
  677. return total_done;
  678. }
  679. /*
  680. * Used to start IO or wait for a given ordered extent to finish.
  681. *
  682. * If wait is one, this effectively waits on page writeback for all the pages
  683. * in the extent, and it waits on the io completion code to insert
  684. * metadata into the btree corresponding to the extent
  685. */
  686. void btrfs_start_ordered_extent(struct inode *inode,
  687. struct btrfs_ordered_extent *entry,
  688. int wait)
  689. {
  690. u64 start = entry->file_offset;
  691. u64 end = start + entry->len - 1;
  692. trace_btrfs_ordered_extent_start(inode, entry);
  693. /*
  694. * pages in the range can be dirty, clean or writeback. We
  695. * start IO on any dirty ones so the wait doesn't stall waiting
  696. * for the flusher thread to find them
  697. */
  698. if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
  699. filemap_fdatawrite_range(inode->i_mapping, start, end);
  700. if (wait) {
  701. wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
  702. &entry->flags));
  703. }
  704. }
  705. /*
  706. * Used to wait on ordered extents across a large range of bytes.
  707. */
  708. int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
  709. {
  710. int ret = 0;
  711. int ret_wb = 0;
  712. u64 end;
  713. u64 orig_end;
  714. struct btrfs_ordered_extent *ordered;
  715. if (start + len < start) {
  716. orig_end = INT_LIMIT(loff_t);
  717. } else {
  718. orig_end = start + len - 1;
  719. if (orig_end > INT_LIMIT(loff_t))
  720. orig_end = INT_LIMIT(loff_t);
  721. }
  722. /* start IO across the range first to instantiate any delalloc
  723. * extents
  724. */
  725. ret = btrfs_fdatawrite_range(inode, start, orig_end);
  726. if (ret)
  727. return ret;
  728. /*
  729. * If we have a writeback error don't return immediately. Wait first
  730. * for any ordered extents that haven't completed yet. This is to make
  731. * sure no one can dirty the same page ranges and call writepages()
  732. * before the ordered extents complete - to avoid failures (-EEXIST)
  733. * when adding the new ordered extents to the ordered tree.
  734. */
  735. ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
  736. end = orig_end;
  737. while (1) {
  738. ordered = btrfs_lookup_first_ordered_extent(inode, end);
  739. if (!ordered)
  740. break;
  741. if (ordered->file_offset > orig_end) {
  742. btrfs_put_ordered_extent(ordered);
  743. break;
  744. }
  745. if (ordered->file_offset + ordered->len <= start) {
  746. btrfs_put_ordered_extent(ordered);
  747. break;
  748. }
  749. btrfs_start_ordered_extent(inode, ordered, 1);
  750. end = ordered->file_offset;
  751. if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
  752. ret = -EIO;
  753. btrfs_put_ordered_extent(ordered);
  754. if (ret || end == 0 || end == start)
  755. break;
  756. end--;
  757. }
  758. return ret_wb ? ret_wb : ret;
  759. }
  760. /*
  761. * find an ordered extent corresponding to file_offset. return NULL if
  762. * nothing is found, otherwise take a reference on the extent and return it
  763. */
  764. struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
  765. u64 file_offset)
  766. {
  767. struct btrfs_ordered_inode_tree *tree;
  768. struct rb_node *node;
  769. struct btrfs_ordered_extent *entry = NULL;
  770. tree = &BTRFS_I(inode)->ordered_tree;
  771. spin_lock_irq(&tree->lock);
  772. node = tree_search(tree, file_offset);
  773. if (!node)
  774. goto out;
  775. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  776. if (!offset_in_entry(entry, file_offset))
  777. entry = NULL;
  778. if (entry)
  779. atomic_inc(&entry->refs);
  780. out:
  781. spin_unlock_irq(&tree->lock);
  782. return entry;
  783. }
  784. /* Since the DIO code tries to lock a wide area we need to look for any ordered
  785. * extents that exist in the range, rather than just the start of the range.
  786. */
  787. struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
  788. struct btrfs_inode *inode, u64 file_offset, u64 len)
  789. {
  790. struct btrfs_ordered_inode_tree *tree;
  791. struct rb_node *node;
  792. struct btrfs_ordered_extent *entry = NULL;
  793. tree = &inode->ordered_tree;
  794. spin_lock_irq(&tree->lock);
  795. node = tree_search(tree, file_offset);
  796. if (!node) {
  797. node = tree_search(tree, file_offset + len);
  798. if (!node)
  799. goto out;
  800. }
  801. while (1) {
  802. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  803. if (range_overlaps(entry, file_offset, len))
  804. break;
  805. if (entry->file_offset >= file_offset + len) {
  806. entry = NULL;
  807. break;
  808. }
  809. entry = NULL;
  810. node = rb_next(node);
  811. if (!node)
  812. break;
  813. }
  814. out:
  815. if (entry)
  816. atomic_inc(&entry->refs);
  817. spin_unlock_irq(&tree->lock);
  818. return entry;
  819. }
  820. bool btrfs_have_ordered_extents_in_range(struct inode *inode,
  821. u64 file_offset,
  822. u64 len)
  823. {
  824. struct btrfs_ordered_extent *oe;
  825. oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
  826. if (oe) {
  827. btrfs_put_ordered_extent(oe);
  828. return true;
  829. }
  830. return false;
  831. }
  832. /*
  833. * lookup and return any extent before 'file_offset'. NULL is returned
  834. * if none is found
  835. */
  836. struct btrfs_ordered_extent *
  837. btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
  838. {
  839. struct btrfs_ordered_inode_tree *tree;
  840. struct rb_node *node;
  841. struct btrfs_ordered_extent *entry = NULL;
  842. tree = &BTRFS_I(inode)->ordered_tree;
  843. spin_lock_irq(&tree->lock);
  844. node = tree_search(tree, file_offset);
  845. if (!node)
  846. goto out;
  847. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  848. atomic_inc(&entry->refs);
  849. out:
  850. spin_unlock_irq(&tree->lock);
  851. return entry;
  852. }
  853. /*
  854. * After an extent is done, call this to conditionally update the on disk
  855. * i_size. i_size is updated to cover any fully written part of the file.
  856. */
  857. int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
  858. struct btrfs_ordered_extent *ordered)
  859. {
  860. struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
  861. u64 disk_i_size;
  862. u64 new_i_size;
  863. u64 i_size = i_size_read(inode);
  864. struct rb_node *node;
  865. struct rb_node *prev = NULL;
  866. struct btrfs_ordered_extent *test;
  867. int ret = 1;
  868. u64 orig_offset = offset;
  869. spin_lock_irq(&tree->lock);
  870. if (ordered) {
  871. offset = entry_end(ordered);
  872. if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
  873. offset = min(offset,
  874. ordered->file_offset +
  875. ordered->truncated_len);
  876. } else {
  877. offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
  878. }
  879. disk_i_size = BTRFS_I(inode)->disk_i_size;
  880. /*
  881. * truncate file.
  882. * If ordered is not NULL, then this is called from endio and
  883. * disk_i_size will be updated by either truncate itself or any
  884. * in-flight IOs which are inside the disk_i_size.
  885. *
  886. * Because btrfs_setsize() may set i_size with disk_i_size if truncate
  887. * fails somehow, we need to make sure we have a precise disk_i_size by
  888. * updating it as usual.
  889. *
  890. */
  891. if (!ordered && disk_i_size > i_size) {
  892. BTRFS_I(inode)->disk_i_size = orig_offset;
  893. ret = 0;
  894. goto out;
  895. }
  896. /*
  897. * if the disk i_size is already at the inode->i_size, or
  898. * this ordered extent is inside the disk i_size, we're done
  899. */
  900. if (disk_i_size == i_size)
  901. goto out;
  902. /*
  903. * We still need to update disk_i_size if outstanding_isize is greater
  904. * than disk_i_size.
  905. */
  906. if (offset <= disk_i_size &&
  907. (!ordered || ordered->outstanding_isize <= disk_i_size))
  908. goto out;
  909. /*
  910. * walk backward from this ordered extent to disk_i_size.
  911. * if we find an ordered extent then we can't update disk i_size
  912. * yet
  913. */
  914. if (ordered) {
  915. node = rb_prev(&ordered->rb_node);
  916. } else {
  917. prev = tree_search(tree, offset);
  918. /*
  919. * we insert file extents without involving ordered struct,
  920. * so there should be no ordered struct cover this offset
  921. */
  922. if (prev) {
  923. test = rb_entry(prev, struct btrfs_ordered_extent,
  924. rb_node);
  925. BUG_ON(offset_in_entry(test, offset));
  926. }
  927. node = prev;
  928. }
  929. for (; node; node = rb_prev(node)) {
  930. test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  931. /* We treat this entry as if it doesn't exist */
  932. if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
  933. continue;
  934. if (entry_end(test) <= disk_i_size)
  935. break;
  936. if (test->file_offset >= i_size)
  937. break;
  938. /*
  939. * We don't update disk_i_size now, so record this undealt
  940. * i_size. Or we will not know the real i_size.
  941. */
  942. if (test->outstanding_isize < offset)
  943. test->outstanding_isize = offset;
  944. if (ordered &&
  945. ordered->outstanding_isize > test->outstanding_isize)
  946. test->outstanding_isize = ordered->outstanding_isize;
  947. goto out;
  948. }
  949. new_i_size = min_t(u64, offset, i_size);
  950. /*
  951. * Some ordered extents may completed before the current one, and
  952. * we hold the real i_size in ->outstanding_isize.
  953. */
  954. if (ordered && ordered->outstanding_isize > new_i_size)
  955. new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
  956. BTRFS_I(inode)->disk_i_size = new_i_size;
  957. ret = 0;
  958. out:
  959. /*
  960. * We need to do this because we can't remove ordered extents until
  961. * after the i_disk_size has been updated and then the inode has been
  962. * updated to reflect the change, so we need to tell anybody who finds
  963. * this ordered extent that we've already done all the real work, we
  964. * just haven't completed all the other work.
  965. */
  966. if (ordered)
  967. set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
  968. spin_unlock_irq(&tree->lock);
  969. return ret;
  970. }
  971. /*
  972. * search the ordered extents for one corresponding to 'offset' and
  973. * try to find a checksum. This is used because we allow pages to
  974. * be reclaimed before their checksum is actually put into the btree
  975. */
  976. int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
  977. u32 *sum, int len)
  978. {
  979. struct btrfs_ordered_sum *ordered_sum;
  980. struct btrfs_ordered_extent *ordered;
  981. struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
  982. unsigned long num_sectors;
  983. unsigned long i;
  984. u32 sectorsize = btrfs_inode_sectorsize(inode);
  985. int index = 0;
  986. ordered = btrfs_lookup_ordered_extent(inode, offset);
  987. if (!ordered)
  988. return 0;
  989. spin_lock_irq(&tree->lock);
  990. list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
  991. if (disk_bytenr >= ordered_sum->bytenr &&
  992. disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
  993. i = (disk_bytenr - ordered_sum->bytenr) >>
  994. inode->i_sb->s_blocksize_bits;
  995. num_sectors = ordered_sum->len >>
  996. inode->i_sb->s_blocksize_bits;
  997. num_sectors = min_t(int, len - index, num_sectors - i);
  998. memcpy(sum + index, ordered_sum->sums + i,
  999. num_sectors);
  1000. index += (int)num_sectors;
  1001. if (index == len)
  1002. goto out;
  1003. disk_bytenr += num_sectors * sectorsize;
  1004. }
  1005. }
  1006. out:
  1007. spin_unlock_irq(&tree->lock);
  1008. btrfs_put_ordered_extent(ordered);
  1009. return index;
  1010. }
  1011. int __init ordered_data_init(void)
  1012. {
  1013. btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
  1014. sizeof(struct btrfs_ordered_extent), 0,
  1015. SLAB_MEM_SPREAD,
  1016. NULL);
  1017. if (!btrfs_ordered_extent_cache)
  1018. return -ENOMEM;
  1019. return 0;
  1020. }
  1021. void ordered_data_exit(void)
  1022. {
  1023. kmem_cache_destroy(btrfs_ordered_extent_cache);
  1024. }