file-item.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/bio.h>
  19. #include <linux/slab.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/highmem.h>
  22. #include "ctree.h"
  23. #include "disk-io.h"
  24. #include "transaction.h"
  25. #include "volumes.h"
  26. #include "print-tree.h"
  27. #include "compression.h"
  28. #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
  29. sizeof(struct btrfs_item) * 2) / \
  30. size) - 1))
  31. #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
  32. PAGE_SIZE))
  33. #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
  34. sizeof(struct btrfs_ordered_sum)) / \
  35. sizeof(u32) * (fs_info)->sectorsize)
  36. int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
  37. struct btrfs_root *root,
  38. u64 objectid, u64 pos,
  39. u64 disk_offset, u64 disk_num_bytes,
  40. u64 num_bytes, u64 offset, u64 ram_bytes,
  41. u8 compression, u8 encryption, u16 other_encoding)
  42. {
  43. int ret = 0;
  44. struct btrfs_file_extent_item *item;
  45. struct btrfs_key file_key;
  46. struct btrfs_path *path;
  47. struct extent_buffer *leaf;
  48. path = btrfs_alloc_path();
  49. if (!path)
  50. return -ENOMEM;
  51. file_key.objectid = objectid;
  52. file_key.offset = pos;
  53. file_key.type = BTRFS_EXTENT_DATA_KEY;
  54. path->leave_spinning = 1;
  55. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  56. sizeof(*item));
  57. if (ret < 0)
  58. goto out;
  59. BUG_ON(ret); /* Can't happen */
  60. leaf = path->nodes[0];
  61. item = btrfs_item_ptr(leaf, path->slots[0],
  62. struct btrfs_file_extent_item);
  63. btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
  64. btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
  65. btrfs_set_file_extent_offset(leaf, item, offset);
  66. btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
  67. btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
  68. btrfs_set_file_extent_generation(leaf, item, trans->transid);
  69. btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
  70. btrfs_set_file_extent_compression(leaf, item, compression);
  71. btrfs_set_file_extent_encryption(leaf, item, encryption);
  72. btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
  73. btrfs_mark_buffer_dirty(leaf);
  74. out:
  75. btrfs_free_path(path);
  76. return ret;
  77. }
  78. static struct btrfs_csum_item *
  79. btrfs_lookup_csum(struct btrfs_trans_handle *trans,
  80. struct btrfs_root *root,
  81. struct btrfs_path *path,
  82. u64 bytenr, int cow)
  83. {
  84. struct btrfs_fs_info *fs_info = root->fs_info;
  85. int ret;
  86. struct btrfs_key file_key;
  87. struct btrfs_key found_key;
  88. struct btrfs_csum_item *item;
  89. struct extent_buffer *leaf;
  90. u64 csum_offset = 0;
  91. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  92. int csums_in_item;
  93. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  94. file_key.offset = bytenr;
  95. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  96. ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
  97. if (ret < 0)
  98. goto fail;
  99. leaf = path->nodes[0];
  100. if (ret > 0) {
  101. ret = 1;
  102. if (path->slots[0] == 0)
  103. goto fail;
  104. path->slots[0]--;
  105. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  106. if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
  107. goto fail;
  108. csum_offset = (bytenr - found_key.offset) >>
  109. fs_info->sb->s_blocksize_bits;
  110. csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
  111. csums_in_item /= csum_size;
  112. if (csum_offset == csums_in_item) {
  113. ret = -EFBIG;
  114. goto fail;
  115. } else if (csum_offset > csums_in_item) {
  116. goto fail;
  117. }
  118. }
  119. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  120. item = (struct btrfs_csum_item *)((unsigned char *)item +
  121. csum_offset * csum_size);
  122. return item;
  123. fail:
  124. if (ret > 0)
  125. ret = -ENOENT;
  126. return ERR_PTR(ret);
  127. }
  128. int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
  129. struct btrfs_root *root,
  130. struct btrfs_path *path, u64 objectid,
  131. u64 offset, int mod)
  132. {
  133. int ret;
  134. struct btrfs_key file_key;
  135. int ins_len = mod < 0 ? -1 : 0;
  136. int cow = mod != 0;
  137. file_key.objectid = objectid;
  138. file_key.offset = offset;
  139. file_key.type = BTRFS_EXTENT_DATA_KEY;
  140. ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
  141. return ret;
  142. }
  143. static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
  144. {
  145. kfree(bio->csum_allocated);
  146. }
  147. static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
  148. struct inode *inode, struct bio *bio,
  149. u64 logical_offset, u32 *dst, int dio)
  150. {
  151. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  152. struct bio_vec *bvec;
  153. struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
  154. struct btrfs_csum_item *item = NULL;
  155. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  156. struct btrfs_path *path;
  157. u8 *csum;
  158. u64 offset = 0;
  159. u64 item_start_offset = 0;
  160. u64 item_last_offset = 0;
  161. u64 disk_bytenr;
  162. u64 page_bytes_left;
  163. u32 diff;
  164. int nblocks;
  165. int count = 0, i;
  166. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  167. path = btrfs_alloc_path();
  168. if (!path)
  169. return -ENOMEM;
  170. nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
  171. if (!dst) {
  172. if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
  173. btrfs_bio->csum_allocated = kmalloc_array(nblocks,
  174. csum_size, GFP_NOFS);
  175. if (!btrfs_bio->csum_allocated) {
  176. btrfs_free_path(path);
  177. return -ENOMEM;
  178. }
  179. btrfs_bio->csum = btrfs_bio->csum_allocated;
  180. btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
  181. } else {
  182. btrfs_bio->csum = btrfs_bio->csum_inline;
  183. }
  184. csum = btrfs_bio->csum;
  185. } else {
  186. csum = (u8 *)dst;
  187. }
  188. if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
  189. path->reada = READA_FORWARD;
  190. WARN_ON(bio->bi_vcnt <= 0);
  191. /*
  192. * the free space stuff is only read when it hasn't been
  193. * updated in the current transaction. So, we can safely
  194. * read from the commit root and sidestep a nasty deadlock
  195. * between reading the free space cache and updating the csum tree.
  196. */
  197. if (btrfs_is_free_space_inode(inode)) {
  198. path->search_commit_root = 1;
  199. path->skip_locking = 1;
  200. }
  201. disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
  202. if (dio)
  203. offset = logical_offset;
  204. bio_for_each_segment_all(bvec, bio, i) {
  205. page_bytes_left = bvec->bv_len;
  206. if (count)
  207. goto next;
  208. if (!dio)
  209. offset = page_offset(bvec->bv_page) + bvec->bv_offset;
  210. count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
  211. (u32 *)csum, nblocks);
  212. if (count)
  213. goto found;
  214. if (!item || disk_bytenr < item_start_offset ||
  215. disk_bytenr >= item_last_offset) {
  216. struct btrfs_key found_key;
  217. u32 item_size;
  218. if (item)
  219. btrfs_release_path(path);
  220. item = btrfs_lookup_csum(NULL, fs_info->csum_root,
  221. path, disk_bytenr, 0);
  222. if (IS_ERR(item)) {
  223. count = 1;
  224. memset(csum, 0, csum_size);
  225. if (BTRFS_I(inode)->root->root_key.objectid ==
  226. BTRFS_DATA_RELOC_TREE_OBJECTID) {
  227. set_extent_bits(io_tree, offset,
  228. offset + fs_info->sectorsize - 1,
  229. EXTENT_NODATASUM);
  230. } else {
  231. btrfs_info_rl(fs_info,
  232. "no csum found for inode %llu start %llu",
  233. btrfs_ino(inode), offset);
  234. }
  235. item = NULL;
  236. btrfs_release_path(path);
  237. goto found;
  238. }
  239. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  240. path->slots[0]);
  241. item_start_offset = found_key.offset;
  242. item_size = btrfs_item_size_nr(path->nodes[0],
  243. path->slots[0]);
  244. item_last_offset = item_start_offset +
  245. (item_size / csum_size) *
  246. fs_info->sectorsize;
  247. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  248. struct btrfs_csum_item);
  249. }
  250. /*
  251. * this byte range must be able to fit inside
  252. * a single leaf so it will also fit inside a u32
  253. */
  254. diff = disk_bytenr - item_start_offset;
  255. diff = diff / fs_info->sectorsize;
  256. diff = diff * csum_size;
  257. count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
  258. inode->i_sb->s_blocksize_bits);
  259. read_extent_buffer(path->nodes[0], csum,
  260. ((unsigned long)item) + diff,
  261. csum_size * count);
  262. found:
  263. csum += count * csum_size;
  264. nblocks -= count;
  265. next:
  266. while (count--) {
  267. disk_bytenr += fs_info->sectorsize;
  268. offset += fs_info->sectorsize;
  269. page_bytes_left -= fs_info->sectorsize;
  270. if (!page_bytes_left)
  271. break; /* move to next bio */
  272. }
  273. }
  274. WARN_ON_ONCE(count);
  275. btrfs_free_path(path);
  276. return 0;
  277. }
  278. int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
  279. struct bio *bio, u32 *dst)
  280. {
  281. return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
  282. }
  283. int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
  284. struct bio *bio, u64 offset)
  285. {
  286. return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
  287. }
  288. int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
  289. struct list_head *list, int search_commit)
  290. {
  291. struct btrfs_fs_info *fs_info = root->fs_info;
  292. struct btrfs_key key;
  293. struct btrfs_path *path;
  294. struct extent_buffer *leaf;
  295. struct btrfs_ordered_sum *sums;
  296. struct btrfs_csum_item *item;
  297. LIST_HEAD(tmplist);
  298. unsigned long offset;
  299. int ret;
  300. size_t size;
  301. u64 csum_end;
  302. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  303. ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
  304. IS_ALIGNED(end + 1, fs_info->sectorsize));
  305. path = btrfs_alloc_path();
  306. if (!path)
  307. return -ENOMEM;
  308. if (search_commit) {
  309. path->skip_locking = 1;
  310. path->reada = READA_FORWARD;
  311. path->search_commit_root = 1;
  312. }
  313. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  314. key.offset = start;
  315. key.type = BTRFS_EXTENT_CSUM_KEY;
  316. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  317. if (ret < 0)
  318. goto fail;
  319. if (ret > 0 && path->slots[0] > 0) {
  320. leaf = path->nodes[0];
  321. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  322. if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
  323. key.type == BTRFS_EXTENT_CSUM_KEY) {
  324. offset = (start - key.offset) >>
  325. fs_info->sb->s_blocksize_bits;
  326. if (offset * csum_size <
  327. btrfs_item_size_nr(leaf, path->slots[0] - 1))
  328. path->slots[0]--;
  329. }
  330. }
  331. while (start <= end) {
  332. leaf = path->nodes[0];
  333. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  334. ret = btrfs_next_leaf(root, path);
  335. if (ret < 0)
  336. goto fail;
  337. if (ret > 0)
  338. break;
  339. leaf = path->nodes[0];
  340. }
  341. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  342. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  343. key.type != BTRFS_EXTENT_CSUM_KEY ||
  344. key.offset > end)
  345. break;
  346. if (key.offset > start)
  347. start = key.offset;
  348. size = btrfs_item_size_nr(leaf, path->slots[0]);
  349. csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
  350. if (csum_end <= start) {
  351. path->slots[0]++;
  352. continue;
  353. }
  354. csum_end = min(csum_end, end + 1);
  355. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  356. struct btrfs_csum_item);
  357. while (start < csum_end) {
  358. size = min_t(size_t, csum_end - start,
  359. MAX_ORDERED_SUM_BYTES(fs_info));
  360. sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
  361. GFP_NOFS);
  362. if (!sums) {
  363. ret = -ENOMEM;
  364. goto fail;
  365. }
  366. sums->bytenr = start;
  367. sums->len = (int)size;
  368. offset = (start - key.offset) >>
  369. fs_info->sb->s_blocksize_bits;
  370. offset *= csum_size;
  371. size >>= fs_info->sb->s_blocksize_bits;
  372. read_extent_buffer(path->nodes[0],
  373. sums->sums,
  374. ((unsigned long)item) + offset,
  375. csum_size * size);
  376. start += fs_info->sectorsize * size;
  377. list_add_tail(&sums->list, &tmplist);
  378. }
  379. path->slots[0]++;
  380. }
  381. ret = 0;
  382. fail:
  383. while (ret < 0 && !list_empty(&tmplist)) {
  384. sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
  385. list_del(&sums->list);
  386. kfree(sums);
  387. }
  388. list_splice_tail(&tmplist, list);
  389. btrfs_free_path(path);
  390. return ret;
  391. }
  392. int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
  393. struct bio *bio, u64 file_start, int contig)
  394. {
  395. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  396. struct btrfs_ordered_sum *sums;
  397. struct btrfs_ordered_extent *ordered = NULL;
  398. char *data;
  399. struct bio_vec *bvec;
  400. int index;
  401. int nr_sectors;
  402. int i, j;
  403. unsigned long total_bytes = 0;
  404. unsigned long this_sum_bytes = 0;
  405. u64 offset;
  406. WARN_ON(bio->bi_vcnt <= 0);
  407. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
  408. GFP_NOFS);
  409. if (!sums)
  410. return -ENOMEM;
  411. sums->len = bio->bi_iter.bi_size;
  412. INIT_LIST_HEAD(&sums->list);
  413. if (contig)
  414. offset = file_start;
  415. else
  416. offset = 0; /* shut up gcc */
  417. sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
  418. index = 0;
  419. bio_for_each_segment_all(bvec, bio, j) {
  420. if (!contig)
  421. offset = page_offset(bvec->bv_page) + bvec->bv_offset;
  422. if (!ordered) {
  423. ordered = btrfs_lookup_ordered_extent(inode, offset);
  424. BUG_ON(!ordered); /* Logic error */
  425. }
  426. data = kmap_atomic(bvec->bv_page);
  427. nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
  428. bvec->bv_len + fs_info->sectorsize
  429. - 1);
  430. for (i = 0; i < nr_sectors; i++) {
  431. if (offset >= ordered->file_offset + ordered->len ||
  432. offset < ordered->file_offset) {
  433. unsigned long bytes_left;
  434. kunmap_atomic(data);
  435. sums->len = this_sum_bytes;
  436. this_sum_bytes = 0;
  437. btrfs_add_ordered_sum(inode, ordered, sums);
  438. btrfs_put_ordered_extent(ordered);
  439. bytes_left = bio->bi_iter.bi_size - total_bytes;
  440. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
  441. GFP_NOFS);
  442. BUG_ON(!sums); /* -ENOMEM */
  443. sums->len = bytes_left;
  444. ordered = btrfs_lookup_ordered_extent(inode,
  445. offset);
  446. ASSERT(ordered); /* Logic error */
  447. sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
  448. + total_bytes;
  449. index = 0;
  450. data = kmap_atomic(bvec->bv_page);
  451. }
  452. sums->sums[index] = ~(u32)0;
  453. sums->sums[index]
  454. = btrfs_csum_data(data + bvec->bv_offset
  455. + (i * fs_info->sectorsize),
  456. sums->sums[index],
  457. fs_info->sectorsize);
  458. btrfs_csum_final(sums->sums[index],
  459. (char *)(sums->sums + index));
  460. index++;
  461. offset += fs_info->sectorsize;
  462. this_sum_bytes += fs_info->sectorsize;
  463. total_bytes += fs_info->sectorsize;
  464. }
  465. kunmap_atomic(data);
  466. }
  467. this_sum_bytes = 0;
  468. btrfs_add_ordered_sum(inode, ordered, sums);
  469. btrfs_put_ordered_extent(ordered);
  470. return 0;
  471. }
  472. /*
  473. * helper function for csum removal, this expects the
  474. * key to describe the csum pointed to by the path, and it expects
  475. * the csum to overlap the range [bytenr, len]
  476. *
  477. * The csum should not be entirely contained in the range and the
  478. * range should not be entirely contained in the csum.
  479. *
  480. * This calls btrfs_truncate_item with the correct args based on the
  481. * overlap, and fixes up the key as required.
  482. */
  483. static noinline void truncate_one_csum(struct btrfs_root *root,
  484. struct btrfs_path *path,
  485. struct btrfs_key *key,
  486. u64 bytenr, u64 len)
  487. {
  488. struct btrfs_fs_info *fs_info = root->fs_info;
  489. struct extent_buffer *leaf;
  490. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  491. u64 csum_end;
  492. u64 end_byte = bytenr + len;
  493. u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
  494. leaf = path->nodes[0];
  495. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  496. csum_end <<= fs_info->sb->s_blocksize_bits;
  497. csum_end += key->offset;
  498. if (key->offset < bytenr && csum_end <= end_byte) {
  499. /*
  500. * [ bytenr - len ]
  501. * [ ]
  502. * [csum ]
  503. * A simple truncate off the end of the item
  504. */
  505. u32 new_size = (bytenr - key->offset) >> blocksize_bits;
  506. new_size *= csum_size;
  507. btrfs_truncate_item(root, path, new_size, 1);
  508. } else if (key->offset >= bytenr && csum_end > end_byte &&
  509. end_byte > key->offset) {
  510. /*
  511. * [ bytenr - len ]
  512. * [ ]
  513. * [csum ]
  514. * we need to truncate from the beginning of the csum
  515. */
  516. u32 new_size = (csum_end - end_byte) >> blocksize_bits;
  517. new_size *= csum_size;
  518. btrfs_truncate_item(root, path, new_size, 0);
  519. key->offset = end_byte;
  520. btrfs_set_item_key_safe(fs_info, path, key);
  521. } else {
  522. BUG();
  523. }
  524. }
  525. /*
  526. * deletes the csum items from the csum tree for a given
  527. * range of bytes.
  528. */
  529. int btrfs_del_csums(struct btrfs_trans_handle *trans,
  530. struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
  531. {
  532. struct btrfs_root *root = fs_info->csum_root;
  533. struct btrfs_path *path;
  534. struct btrfs_key key;
  535. u64 end_byte = bytenr + len;
  536. u64 csum_end;
  537. struct extent_buffer *leaf;
  538. int ret;
  539. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  540. int blocksize_bits = fs_info->sb->s_blocksize_bits;
  541. path = btrfs_alloc_path();
  542. if (!path)
  543. return -ENOMEM;
  544. while (1) {
  545. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  546. key.offset = end_byte - 1;
  547. key.type = BTRFS_EXTENT_CSUM_KEY;
  548. path->leave_spinning = 1;
  549. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  550. if (ret > 0) {
  551. if (path->slots[0] == 0)
  552. break;
  553. path->slots[0]--;
  554. } else if (ret < 0) {
  555. break;
  556. }
  557. leaf = path->nodes[0];
  558. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  559. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  560. key.type != BTRFS_EXTENT_CSUM_KEY) {
  561. break;
  562. }
  563. if (key.offset >= end_byte)
  564. break;
  565. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  566. csum_end <<= blocksize_bits;
  567. csum_end += key.offset;
  568. /* this csum ends before we start, we're done */
  569. if (csum_end <= bytenr)
  570. break;
  571. /* delete the entire item, it is inside our range */
  572. if (key.offset >= bytenr && csum_end <= end_byte) {
  573. ret = btrfs_del_item(trans, root, path);
  574. if (ret)
  575. goto out;
  576. if (key.offset == bytenr)
  577. break;
  578. } else if (key.offset < bytenr && csum_end > end_byte) {
  579. unsigned long offset;
  580. unsigned long shift_len;
  581. unsigned long item_offset;
  582. /*
  583. * [ bytenr - len ]
  584. * [csum ]
  585. *
  586. * Our bytes are in the middle of the csum,
  587. * we need to split this item and insert a new one.
  588. *
  589. * But we can't drop the path because the
  590. * csum could change, get removed, extended etc.
  591. *
  592. * The trick here is the max size of a csum item leaves
  593. * enough room in the tree block for a single
  594. * item header. So, we split the item in place,
  595. * adding a new header pointing to the existing
  596. * bytes. Then we loop around again and we have
  597. * a nicely formed csum item that we can neatly
  598. * truncate.
  599. */
  600. offset = (bytenr - key.offset) >> blocksize_bits;
  601. offset *= csum_size;
  602. shift_len = (len >> blocksize_bits) * csum_size;
  603. item_offset = btrfs_item_ptr_offset(leaf,
  604. path->slots[0]);
  605. memzero_extent_buffer(leaf, item_offset + offset,
  606. shift_len);
  607. key.offset = bytenr;
  608. /*
  609. * btrfs_split_item returns -EAGAIN when the
  610. * item changed size or key
  611. */
  612. ret = btrfs_split_item(trans, root, path, &key, offset);
  613. if (ret && ret != -EAGAIN) {
  614. btrfs_abort_transaction(trans, ret);
  615. goto out;
  616. }
  617. key.offset = end_byte - 1;
  618. } else {
  619. truncate_one_csum(root, path, &key, bytenr, len);
  620. if (key.offset < bytenr)
  621. break;
  622. }
  623. btrfs_release_path(path);
  624. }
  625. ret = 0;
  626. out:
  627. btrfs_free_path(path);
  628. return ret;
  629. }
  630. int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
  631. struct btrfs_root *root,
  632. struct btrfs_ordered_sum *sums)
  633. {
  634. struct btrfs_fs_info *fs_info = root->fs_info;
  635. struct btrfs_key file_key;
  636. struct btrfs_key found_key;
  637. struct btrfs_path *path;
  638. struct btrfs_csum_item *item;
  639. struct btrfs_csum_item *item_end;
  640. struct extent_buffer *leaf = NULL;
  641. u64 next_offset;
  642. u64 total_bytes = 0;
  643. u64 csum_offset;
  644. u64 bytenr;
  645. u32 nritems;
  646. u32 ins_size;
  647. int index = 0;
  648. int found_next;
  649. int ret;
  650. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  651. path = btrfs_alloc_path();
  652. if (!path)
  653. return -ENOMEM;
  654. again:
  655. next_offset = (u64)-1;
  656. found_next = 0;
  657. bytenr = sums->bytenr + total_bytes;
  658. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  659. file_key.offset = bytenr;
  660. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  661. item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
  662. if (!IS_ERR(item)) {
  663. ret = 0;
  664. leaf = path->nodes[0];
  665. item_end = btrfs_item_ptr(leaf, path->slots[0],
  666. struct btrfs_csum_item);
  667. item_end = (struct btrfs_csum_item *)((char *)item_end +
  668. btrfs_item_size_nr(leaf, path->slots[0]));
  669. goto found;
  670. }
  671. ret = PTR_ERR(item);
  672. if (ret != -EFBIG && ret != -ENOENT)
  673. goto fail_unlock;
  674. if (ret == -EFBIG) {
  675. u32 item_size;
  676. /* we found one, but it isn't big enough yet */
  677. leaf = path->nodes[0];
  678. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  679. if ((item_size / csum_size) >=
  680. MAX_CSUM_ITEMS(fs_info, csum_size)) {
  681. /* already at max size, make a new one */
  682. goto insert;
  683. }
  684. } else {
  685. int slot = path->slots[0] + 1;
  686. /* we didn't find a csum item, insert one */
  687. nritems = btrfs_header_nritems(path->nodes[0]);
  688. if (!nritems || (path->slots[0] >= nritems - 1)) {
  689. ret = btrfs_next_leaf(root, path);
  690. if (ret == 1)
  691. found_next = 1;
  692. if (ret != 0)
  693. goto insert;
  694. slot = path->slots[0];
  695. }
  696. btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
  697. if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  698. found_key.type != BTRFS_EXTENT_CSUM_KEY) {
  699. found_next = 1;
  700. goto insert;
  701. }
  702. next_offset = found_key.offset;
  703. found_next = 1;
  704. goto insert;
  705. }
  706. /*
  707. * at this point, we know the tree has an item, but it isn't big
  708. * enough yet to put our csum in. Grow it
  709. */
  710. btrfs_release_path(path);
  711. ret = btrfs_search_slot(trans, root, &file_key, path,
  712. csum_size, 1);
  713. if (ret < 0)
  714. goto fail_unlock;
  715. if (ret > 0) {
  716. if (path->slots[0] == 0)
  717. goto insert;
  718. path->slots[0]--;
  719. }
  720. leaf = path->nodes[0];
  721. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  722. csum_offset = (bytenr - found_key.offset) >>
  723. fs_info->sb->s_blocksize_bits;
  724. if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
  725. found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  726. csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
  727. goto insert;
  728. }
  729. if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
  730. csum_size) {
  731. int extend_nr;
  732. u64 tmp;
  733. u32 diff;
  734. u32 free_space;
  735. if (btrfs_leaf_free_space(root, leaf) <
  736. sizeof(struct btrfs_item) + csum_size * 2)
  737. goto insert;
  738. free_space = btrfs_leaf_free_space(root, leaf) -
  739. sizeof(struct btrfs_item) - csum_size;
  740. tmp = sums->len - total_bytes;
  741. tmp >>= fs_info->sb->s_blocksize_bits;
  742. WARN_ON(tmp < 1);
  743. extend_nr = max_t(int, 1, (int)tmp);
  744. diff = (csum_offset + extend_nr) * csum_size;
  745. diff = min(diff,
  746. MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
  747. diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
  748. diff = min(free_space, diff);
  749. diff /= csum_size;
  750. diff *= csum_size;
  751. btrfs_extend_item(root, path, diff);
  752. ret = 0;
  753. goto csum;
  754. }
  755. insert:
  756. btrfs_release_path(path);
  757. csum_offset = 0;
  758. if (found_next) {
  759. u64 tmp;
  760. tmp = sums->len - total_bytes;
  761. tmp >>= fs_info->sb->s_blocksize_bits;
  762. tmp = min(tmp, (next_offset - file_key.offset) >>
  763. fs_info->sb->s_blocksize_bits);
  764. tmp = max((u64)1, tmp);
  765. tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size));
  766. ins_size = csum_size * tmp;
  767. } else {
  768. ins_size = csum_size;
  769. }
  770. path->leave_spinning = 1;
  771. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  772. ins_size);
  773. path->leave_spinning = 0;
  774. if (ret < 0)
  775. goto fail_unlock;
  776. if (WARN_ON(ret != 0))
  777. goto fail_unlock;
  778. leaf = path->nodes[0];
  779. csum:
  780. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  781. item_end = (struct btrfs_csum_item *)((unsigned char *)item +
  782. btrfs_item_size_nr(leaf, path->slots[0]));
  783. item = (struct btrfs_csum_item *)((unsigned char *)item +
  784. csum_offset * csum_size);
  785. found:
  786. ins_size = (u32)(sums->len - total_bytes) >>
  787. fs_info->sb->s_blocksize_bits;
  788. ins_size *= csum_size;
  789. ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
  790. ins_size);
  791. write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
  792. ins_size);
  793. ins_size /= csum_size;
  794. total_bytes += ins_size * fs_info->sectorsize;
  795. index += ins_size;
  796. btrfs_mark_buffer_dirty(path->nodes[0]);
  797. if (total_bytes < sums->len) {
  798. btrfs_release_path(path);
  799. cond_resched();
  800. goto again;
  801. }
  802. out:
  803. btrfs_free_path(path);
  804. return ret;
  805. fail_unlock:
  806. goto out;
  807. }
  808. void btrfs_extent_item_to_extent_map(struct inode *inode,
  809. const struct btrfs_path *path,
  810. struct btrfs_file_extent_item *fi,
  811. const bool new_inline,
  812. struct extent_map *em)
  813. {
  814. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  815. struct btrfs_root *root = BTRFS_I(inode)->root;
  816. struct extent_buffer *leaf = path->nodes[0];
  817. const int slot = path->slots[0];
  818. struct btrfs_key key;
  819. u64 extent_start, extent_end;
  820. u64 bytenr;
  821. u8 type = btrfs_file_extent_type(leaf, fi);
  822. int compress_type = btrfs_file_extent_compression(leaf, fi);
  823. em->bdev = fs_info->fs_devices->latest_bdev;
  824. btrfs_item_key_to_cpu(leaf, &key, slot);
  825. extent_start = key.offset;
  826. if (type == BTRFS_FILE_EXTENT_REG ||
  827. type == BTRFS_FILE_EXTENT_PREALLOC) {
  828. extent_end = extent_start +
  829. btrfs_file_extent_num_bytes(leaf, fi);
  830. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  831. size_t size;
  832. size = btrfs_file_extent_inline_len(leaf, slot, fi);
  833. extent_end = ALIGN(extent_start + size,
  834. fs_info->sectorsize);
  835. }
  836. em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
  837. if (type == BTRFS_FILE_EXTENT_REG ||
  838. type == BTRFS_FILE_EXTENT_PREALLOC) {
  839. em->start = extent_start;
  840. em->len = extent_end - extent_start;
  841. em->orig_start = extent_start -
  842. btrfs_file_extent_offset(leaf, fi);
  843. em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
  844. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  845. if (bytenr == 0) {
  846. em->block_start = EXTENT_MAP_HOLE;
  847. return;
  848. }
  849. if (compress_type != BTRFS_COMPRESS_NONE) {
  850. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  851. em->compress_type = compress_type;
  852. em->block_start = bytenr;
  853. em->block_len = em->orig_block_len;
  854. } else {
  855. bytenr += btrfs_file_extent_offset(leaf, fi);
  856. em->block_start = bytenr;
  857. em->block_len = em->len;
  858. if (type == BTRFS_FILE_EXTENT_PREALLOC)
  859. set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
  860. }
  861. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  862. em->block_start = EXTENT_MAP_INLINE;
  863. em->start = extent_start;
  864. em->len = extent_end - extent_start;
  865. /*
  866. * Initialize orig_start and block_len with the same values
  867. * as in inode.c:btrfs_get_extent().
  868. */
  869. em->orig_start = EXTENT_MAP_HOLE;
  870. em->block_len = (u64)-1;
  871. if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
  872. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  873. em->compress_type = compress_type;
  874. }
  875. } else {
  876. btrfs_err(fs_info,
  877. "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
  878. type, btrfs_ino(inode), extent_start,
  879. root->root_key.objectid);
  880. }
  881. }