file-item.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/bio.h>
  19. #include <linux/slab.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/highmem.h>
  22. #include "ctree.h"
  23. #include "disk-io.h"
  24. #include "transaction.h"
  25. #include "volumes.h"
  26. #include "print-tree.h"
  27. #include "compression.h"
  28. #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
  29. sizeof(struct btrfs_item) * 2) / \
  30. size) - 1))
  31. #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
  32. PAGE_SIZE))
  33. #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
  34. sizeof(struct btrfs_ordered_sum)) / \
  35. sizeof(u32) * (fs_info)->sectorsize)
  36. int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
  37. struct btrfs_root *root,
  38. u64 objectid, u64 pos,
  39. u64 disk_offset, u64 disk_num_bytes,
  40. u64 num_bytes, u64 offset, u64 ram_bytes,
  41. u8 compression, u8 encryption, u16 other_encoding)
  42. {
  43. int ret = 0;
  44. struct btrfs_file_extent_item *item;
  45. struct btrfs_key file_key;
  46. struct btrfs_path *path;
  47. struct extent_buffer *leaf;
  48. path = btrfs_alloc_path();
  49. if (!path)
  50. return -ENOMEM;
  51. file_key.objectid = objectid;
  52. file_key.offset = pos;
  53. file_key.type = BTRFS_EXTENT_DATA_KEY;
  54. path->leave_spinning = 1;
  55. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  56. sizeof(*item));
  57. if (ret < 0)
  58. goto out;
  59. BUG_ON(ret); /* Can't happen */
  60. leaf = path->nodes[0];
  61. item = btrfs_item_ptr(leaf, path->slots[0],
  62. struct btrfs_file_extent_item);
  63. btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
  64. btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
  65. btrfs_set_file_extent_offset(leaf, item, offset);
  66. btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
  67. btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
  68. btrfs_set_file_extent_generation(leaf, item, trans->transid);
  69. btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
  70. btrfs_set_file_extent_compression(leaf, item, compression);
  71. btrfs_set_file_extent_encryption(leaf, item, encryption);
  72. btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
  73. btrfs_mark_buffer_dirty(leaf);
  74. out:
  75. btrfs_free_path(path);
  76. return ret;
  77. }
  78. static struct btrfs_csum_item *
  79. btrfs_lookup_csum(struct btrfs_trans_handle *trans,
  80. struct btrfs_root *root,
  81. struct btrfs_path *path,
  82. u64 bytenr, int cow)
  83. {
  84. struct btrfs_fs_info *fs_info = root->fs_info;
  85. int ret;
  86. struct btrfs_key file_key;
  87. struct btrfs_key found_key;
  88. struct btrfs_csum_item *item;
  89. struct extent_buffer *leaf;
  90. u64 csum_offset = 0;
  91. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  92. int csums_in_item;
  93. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  94. file_key.offset = bytenr;
  95. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  96. ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
  97. if (ret < 0)
  98. goto fail;
  99. leaf = path->nodes[0];
  100. if (ret > 0) {
  101. ret = 1;
  102. if (path->slots[0] == 0)
  103. goto fail;
  104. path->slots[0]--;
  105. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  106. if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
  107. goto fail;
  108. csum_offset = (bytenr - found_key.offset) >>
  109. fs_info->sb->s_blocksize_bits;
  110. csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
  111. csums_in_item /= csum_size;
  112. if (csum_offset == csums_in_item) {
  113. ret = -EFBIG;
  114. goto fail;
  115. } else if (csum_offset > csums_in_item) {
  116. goto fail;
  117. }
  118. }
  119. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  120. item = (struct btrfs_csum_item *)((unsigned char *)item +
  121. csum_offset * csum_size);
  122. return item;
  123. fail:
  124. if (ret > 0)
  125. ret = -ENOENT;
  126. return ERR_PTR(ret);
  127. }
  128. int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
  129. struct btrfs_root *root,
  130. struct btrfs_path *path, u64 objectid,
  131. u64 offset, int mod)
  132. {
  133. int ret;
  134. struct btrfs_key file_key;
  135. int ins_len = mod < 0 ? -1 : 0;
  136. int cow = mod != 0;
  137. file_key.objectid = objectid;
  138. file_key.offset = offset;
  139. file_key.type = BTRFS_EXTENT_DATA_KEY;
  140. ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
  141. return ret;
  142. }
  143. static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
  144. {
  145. kfree(bio->csum_allocated);
  146. }
  147. static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
  148. u64 logical_offset, u32 *dst, int dio)
  149. {
  150. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  151. struct bio_vec *bvec;
  152. struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
  153. struct btrfs_csum_item *item = NULL;
  154. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  155. struct btrfs_path *path;
  156. u8 *csum;
  157. u64 offset = 0;
  158. u64 item_start_offset = 0;
  159. u64 item_last_offset = 0;
  160. u64 disk_bytenr;
  161. u64 page_bytes_left;
  162. u32 diff;
  163. int nblocks;
  164. int count = 0, i;
  165. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  166. path = btrfs_alloc_path();
  167. if (!path)
  168. return -ENOMEM;
  169. nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
  170. if (!dst) {
  171. if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
  172. btrfs_bio->csum_allocated = kmalloc_array(nblocks,
  173. csum_size, GFP_NOFS);
  174. if (!btrfs_bio->csum_allocated) {
  175. btrfs_free_path(path);
  176. return -ENOMEM;
  177. }
  178. btrfs_bio->csum = btrfs_bio->csum_allocated;
  179. btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
  180. } else {
  181. btrfs_bio->csum = btrfs_bio->csum_inline;
  182. }
  183. csum = btrfs_bio->csum;
  184. } else {
  185. csum = (u8 *)dst;
  186. }
  187. if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
  188. path->reada = READA_FORWARD;
  189. WARN_ON(bio->bi_vcnt <= 0);
  190. /*
  191. * the free space stuff is only read when it hasn't been
  192. * updated in the current transaction. So, we can safely
  193. * read from the commit root and sidestep a nasty deadlock
  194. * between reading the free space cache and updating the csum tree.
  195. */
  196. if (btrfs_is_free_space_inode(inode)) {
  197. path->search_commit_root = 1;
  198. path->skip_locking = 1;
  199. }
  200. disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
  201. if (dio)
  202. offset = logical_offset;
  203. bio_for_each_segment_all(bvec, bio, i) {
  204. page_bytes_left = bvec->bv_len;
  205. if (count)
  206. goto next;
  207. if (!dio)
  208. offset = page_offset(bvec->bv_page) + bvec->bv_offset;
  209. count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
  210. (u32 *)csum, nblocks);
  211. if (count)
  212. goto found;
  213. if (!item || disk_bytenr < item_start_offset ||
  214. disk_bytenr >= item_last_offset) {
  215. struct btrfs_key found_key;
  216. u32 item_size;
  217. if (item)
  218. btrfs_release_path(path);
  219. item = btrfs_lookup_csum(NULL, fs_info->csum_root,
  220. path, disk_bytenr, 0);
  221. if (IS_ERR(item)) {
  222. count = 1;
  223. memset(csum, 0, csum_size);
  224. if (BTRFS_I(inode)->root->root_key.objectid ==
  225. BTRFS_DATA_RELOC_TREE_OBJECTID) {
  226. set_extent_bits(io_tree, offset,
  227. offset + fs_info->sectorsize - 1,
  228. EXTENT_NODATASUM);
  229. } else {
  230. btrfs_info_rl(fs_info,
  231. "no csum found for inode %llu start %llu",
  232. btrfs_ino(inode), offset);
  233. }
  234. item = NULL;
  235. btrfs_release_path(path);
  236. goto found;
  237. }
  238. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  239. path->slots[0]);
  240. item_start_offset = found_key.offset;
  241. item_size = btrfs_item_size_nr(path->nodes[0],
  242. path->slots[0]);
  243. item_last_offset = item_start_offset +
  244. (item_size / csum_size) *
  245. fs_info->sectorsize;
  246. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  247. struct btrfs_csum_item);
  248. }
  249. /*
  250. * this byte range must be able to fit inside
  251. * a single leaf so it will also fit inside a u32
  252. */
  253. diff = disk_bytenr - item_start_offset;
  254. diff = diff / fs_info->sectorsize;
  255. diff = diff * csum_size;
  256. count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
  257. inode->i_sb->s_blocksize_bits);
  258. read_extent_buffer(path->nodes[0], csum,
  259. ((unsigned long)item) + diff,
  260. csum_size * count);
  261. found:
  262. csum += count * csum_size;
  263. nblocks -= count;
  264. next:
  265. while (count--) {
  266. disk_bytenr += fs_info->sectorsize;
  267. offset += fs_info->sectorsize;
  268. page_bytes_left -= fs_info->sectorsize;
  269. if (!page_bytes_left)
  270. break; /* move to next bio */
  271. }
  272. }
  273. WARN_ON_ONCE(count);
  274. btrfs_free_path(path);
  275. return 0;
  276. }
  277. int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
  278. {
  279. return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
  280. }
  281. int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
  282. {
  283. return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
  284. }
  285. int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
  286. struct list_head *list, int search_commit)
  287. {
  288. struct btrfs_fs_info *fs_info = root->fs_info;
  289. struct btrfs_key key;
  290. struct btrfs_path *path;
  291. struct extent_buffer *leaf;
  292. struct btrfs_ordered_sum *sums;
  293. struct btrfs_csum_item *item;
  294. LIST_HEAD(tmplist);
  295. unsigned long offset;
  296. int ret;
  297. size_t size;
  298. u64 csum_end;
  299. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  300. ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
  301. IS_ALIGNED(end + 1, fs_info->sectorsize));
  302. path = btrfs_alloc_path();
  303. if (!path)
  304. return -ENOMEM;
  305. if (search_commit) {
  306. path->skip_locking = 1;
  307. path->reada = READA_FORWARD;
  308. path->search_commit_root = 1;
  309. }
  310. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  311. key.offset = start;
  312. key.type = BTRFS_EXTENT_CSUM_KEY;
  313. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  314. if (ret < 0)
  315. goto fail;
  316. if (ret > 0 && path->slots[0] > 0) {
  317. leaf = path->nodes[0];
  318. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  319. if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
  320. key.type == BTRFS_EXTENT_CSUM_KEY) {
  321. offset = (start - key.offset) >>
  322. fs_info->sb->s_blocksize_bits;
  323. if (offset * csum_size <
  324. btrfs_item_size_nr(leaf, path->slots[0] - 1))
  325. path->slots[0]--;
  326. }
  327. }
  328. while (start <= end) {
  329. leaf = path->nodes[0];
  330. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  331. ret = btrfs_next_leaf(root, path);
  332. if (ret < 0)
  333. goto fail;
  334. if (ret > 0)
  335. break;
  336. leaf = path->nodes[0];
  337. }
  338. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  339. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  340. key.type != BTRFS_EXTENT_CSUM_KEY ||
  341. key.offset > end)
  342. break;
  343. if (key.offset > start)
  344. start = key.offset;
  345. size = btrfs_item_size_nr(leaf, path->slots[0]);
  346. csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
  347. if (csum_end <= start) {
  348. path->slots[0]++;
  349. continue;
  350. }
  351. csum_end = min(csum_end, end + 1);
  352. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  353. struct btrfs_csum_item);
  354. while (start < csum_end) {
  355. size = min_t(size_t, csum_end - start,
  356. MAX_ORDERED_SUM_BYTES(fs_info));
  357. sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
  358. GFP_NOFS);
  359. if (!sums) {
  360. ret = -ENOMEM;
  361. goto fail;
  362. }
  363. sums->bytenr = start;
  364. sums->len = (int)size;
  365. offset = (start - key.offset) >>
  366. fs_info->sb->s_blocksize_bits;
  367. offset *= csum_size;
  368. size >>= fs_info->sb->s_blocksize_bits;
  369. read_extent_buffer(path->nodes[0],
  370. sums->sums,
  371. ((unsigned long)item) + offset,
  372. csum_size * size);
  373. start += fs_info->sectorsize * size;
  374. list_add_tail(&sums->list, &tmplist);
  375. }
  376. path->slots[0]++;
  377. }
  378. ret = 0;
  379. fail:
  380. while (ret < 0 && !list_empty(&tmplist)) {
  381. sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
  382. list_del(&sums->list);
  383. kfree(sums);
  384. }
  385. list_splice_tail(&tmplist, list);
  386. btrfs_free_path(path);
  387. return ret;
  388. }
  389. int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
  390. u64 file_start, int contig)
  391. {
  392. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  393. struct btrfs_ordered_sum *sums;
  394. struct btrfs_ordered_extent *ordered = NULL;
  395. char *data;
  396. struct bio_vec *bvec;
  397. int index;
  398. int nr_sectors;
  399. int i, j;
  400. unsigned long total_bytes = 0;
  401. unsigned long this_sum_bytes = 0;
  402. u64 offset;
  403. WARN_ON(bio->bi_vcnt <= 0);
  404. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
  405. GFP_NOFS);
  406. if (!sums)
  407. return -ENOMEM;
  408. sums->len = bio->bi_iter.bi_size;
  409. INIT_LIST_HEAD(&sums->list);
  410. if (contig)
  411. offset = file_start;
  412. else
  413. offset = 0; /* shut up gcc */
  414. sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
  415. index = 0;
  416. bio_for_each_segment_all(bvec, bio, j) {
  417. if (!contig)
  418. offset = page_offset(bvec->bv_page) + bvec->bv_offset;
  419. if (!ordered) {
  420. ordered = btrfs_lookup_ordered_extent(inode, offset);
  421. BUG_ON(!ordered); /* Logic error */
  422. }
  423. data = kmap_atomic(bvec->bv_page);
  424. nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
  425. bvec->bv_len + fs_info->sectorsize
  426. - 1);
  427. for (i = 0; i < nr_sectors; i++) {
  428. if (offset >= ordered->file_offset + ordered->len ||
  429. offset < ordered->file_offset) {
  430. unsigned long bytes_left;
  431. kunmap_atomic(data);
  432. sums->len = this_sum_bytes;
  433. this_sum_bytes = 0;
  434. btrfs_add_ordered_sum(inode, ordered, sums);
  435. btrfs_put_ordered_extent(ordered);
  436. bytes_left = bio->bi_iter.bi_size - total_bytes;
  437. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
  438. GFP_NOFS);
  439. BUG_ON(!sums); /* -ENOMEM */
  440. sums->len = bytes_left;
  441. ordered = btrfs_lookup_ordered_extent(inode,
  442. offset);
  443. ASSERT(ordered); /* Logic error */
  444. sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
  445. + total_bytes;
  446. index = 0;
  447. data = kmap_atomic(bvec->bv_page);
  448. }
  449. sums->sums[index] = ~(u32)0;
  450. sums->sums[index]
  451. = btrfs_csum_data(data + bvec->bv_offset
  452. + (i * fs_info->sectorsize),
  453. sums->sums[index],
  454. fs_info->sectorsize);
  455. btrfs_csum_final(sums->sums[index],
  456. (char *)(sums->sums + index));
  457. index++;
  458. offset += fs_info->sectorsize;
  459. this_sum_bytes += fs_info->sectorsize;
  460. total_bytes += fs_info->sectorsize;
  461. }
  462. kunmap_atomic(data);
  463. }
  464. this_sum_bytes = 0;
  465. btrfs_add_ordered_sum(inode, ordered, sums);
  466. btrfs_put_ordered_extent(ordered);
  467. return 0;
  468. }
  469. /*
  470. * helper function for csum removal, this expects the
  471. * key to describe the csum pointed to by the path, and it expects
  472. * the csum to overlap the range [bytenr, len]
  473. *
  474. * The csum should not be entirely contained in the range and the
  475. * range should not be entirely contained in the csum.
  476. *
  477. * This calls btrfs_truncate_item with the correct args based on the
  478. * overlap, and fixes up the key as required.
  479. */
  480. static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
  481. struct btrfs_path *path,
  482. struct btrfs_key *key,
  483. u64 bytenr, u64 len)
  484. {
  485. struct extent_buffer *leaf;
  486. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  487. u64 csum_end;
  488. u64 end_byte = bytenr + len;
  489. u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
  490. leaf = path->nodes[0];
  491. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  492. csum_end <<= fs_info->sb->s_blocksize_bits;
  493. csum_end += key->offset;
  494. if (key->offset < bytenr && csum_end <= end_byte) {
  495. /*
  496. * [ bytenr - len ]
  497. * [ ]
  498. * [csum ]
  499. * A simple truncate off the end of the item
  500. */
  501. u32 new_size = (bytenr - key->offset) >> blocksize_bits;
  502. new_size *= csum_size;
  503. btrfs_truncate_item(fs_info, path, new_size, 1);
  504. } else if (key->offset >= bytenr && csum_end > end_byte &&
  505. end_byte > key->offset) {
  506. /*
  507. * [ bytenr - len ]
  508. * [ ]
  509. * [csum ]
  510. * we need to truncate from the beginning of the csum
  511. */
  512. u32 new_size = (csum_end - end_byte) >> blocksize_bits;
  513. new_size *= csum_size;
  514. btrfs_truncate_item(fs_info, path, new_size, 0);
  515. key->offset = end_byte;
  516. btrfs_set_item_key_safe(fs_info, path, key);
  517. } else {
  518. BUG();
  519. }
  520. }
  521. /*
  522. * deletes the csum items from the csum tree for a given
  523. * range of bytes.
  524. */
  525. int btrfs_del_csums(struct btrfs_trans_handle *trans,
  526. struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
  527. {
  528. struct btrfs_root *root = fs_info->csum_root;
  529. struct btrfs_path *path;
  530. struct btrfs_key key;
  531. u64 end_byte = bytenr + len;
  532. u64 csum_end;
  533. struct extent_buffer *leaf;
  534. int ret;
  535. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  536. int blocksize_bits = fs_info->sb->s_blocksize_bits;
  537. path = btrfs_alloc_path();
  538. if (!path)
  539. return -ENOMEM;
  540. while (1) {
  541. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  542. key.offset = end_byte - 1;
  543. key.type = BTRFS_EXTENT_CSUM_KEY;
  544. path->leave_spinning = 1;
  545. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  546. if (ret > 0) {
  547. if (path->slots[0] == 0)
  548. break;
  549. path->slots[0]--;
  550. } else if (ret < 0) {
  551. break;
  552. }
  553. leaf = path->nodes[0];
  554. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  555. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  556. key.type != BTRFS_EXTENT_CSUM_KEY) {
  557. break;
  558. }
  559. if (key.offset >= end_byte)
  560. break;
  561. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  562. csum_end <<= blocksize_bits;
  563. csum_end += key.offset;
  564. /* this csum ends before we start, we're done */
  565. if (csum_end <= bytenr)
  566. break;
  567. /* delete the entire item, it is inside our range */
  568. if (key.offset >= bytenr && csum_end <= end_byte) {
  569. ret = btrfs_del_item(trans, root, path);
  570. if (ret)
  571. goto out;
  572. if (key.offset == bytenr)
  573. break;
  574. } else if (key.offset < bytenr && csum_end > end_byte) {
  575. unsigned long offset;
  576. unsigned long shift_len;
  577. unsigned long item_offset;
  578. /*
  579. * [ bytenr - len ]
  580. * [csum ]
  581. *
  582. * Our bytes are in the middle of the csum,
  583. * we need to split this item and insert a new one.
  584. *
  585. * But we can't drop the path because the
  586. * csum could change, get removed, extended etc.
  587. *
  588. * The trick here is the max size of a csum item leaves
  589. * enough room in the tree block for a single
  590. * item header. So, we split the item in place,
  591. * adding a new header pointing to the existing
  592. * bytes. Then we loop around again and we have
  593. * a nicely formed csum item that we can neatly
  594. * truncate.
  595. */
  596. offset = (bytenr - key.offset) >> blocksize_bits;
  597. offset *= csum_size;
  598. shift_len = (len >> blocksize_bits) * csum_size;
  599. item_offset = btrfs_item_ptr_offset(leaf,
  600. path->slots[0]);
  601. memzero_extent_buffer(leaf, item_offset + offset,
  602. shift_len);
  603. key.offset = bytenr;
  604. /*
  605. * btrfs_split_item returns -EAGAIN when the
  606. * item changed size or key
  607. */
  608. ret = btrfs_split_item(trans, root, path, &key, offset);
  609. if (ret && ret != -EAGAIN) {
  610. btrfs_abort_transaction(trans, ret);
  611. goto out;
  612. }
  613. key.offset = end_byte - 1;
  614. } else {
  615. truncate_one_csum(fs_info, path, &key, bytenr, len);
  616. if (key.offset < bytenr)
  617. break;
  618. }
  619. btrfs_release_path(path);
  620. }
  621. ret = 0;
  622. out:
  623. btrfs_free_path(path);
  624. return ret;
  625. }
  626. int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
  627. struct btrfs_root *root,
  628. struct btrfs_ordered_sum *sums)
  629. {
  630. struct btrfs_fs_info *fs_info = root->fs_info;
  631. struct btrfs_key file_key;
  632. struct btrfs_key found_key;
  633. struct btrfs_path *path;
  634. struct btrfs_csum_item *item;
  635. struct btrfs_csum_item *item_end;
  636. struct extent_buffer *leaf = NULL;
  637. u64 next_offset;
  638. u64 total_bytes = 0;
  639. u64 csum_offset;
  640. u64 bytenr;
  641. u32 nritems;
  642. u32 ins_size;
  643. int index = 0;
  644. int found_next;
  645. int ret;
  646. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  647. path = btrfs_alloc_path();
  648. if (!path)
  649. return -ENOMEM;
  650. again:
  651. next_offset = (u64)-1;
  652. found_next = 0;
  653. bytenr = sums->bytenr + total_bytes;
  654. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  655. file_key.offset = bytenr;
  656. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  657. item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
  658. if (!IS_ERR(item)) {
  659. ret = 0;
  660. leaf = path->nodes[0];
  661. item_end = btrfs_item_ptr(leaf, path->slots[0],
  662. struct btrfs_csum_item);
  663. item_end = (struct btrfs_csum_item *)((char *)item_end +
  664. btrfs_item_size_nr(leaf, path->slots[0]));
  665. goto found;
  666. }
  667. ret = PTR_ERR(item);
  668. if (ret != -EFBIG && ret != -ENOENT)
  669. goto fail_unlock;
  670. if (ret == -EFBIG) {
  671. u32 item_size;
  672. /* we found one, but it isn't big enough yet */
  673. leaf = path->nodes[0];
  674. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  675. if ((item_size / csum_size) >=
  676. MAX_CSUM_ITEMS(fs_info, csum_size)) {
  677. /* already at max size, make a new one */
  678. goto insert;
  679. }
  680. } else {
  681. int slot = path->slots[0] + 1;
  682. /* we didn't find a csum item, insert one */
  683. nritems = btrfs_header_nritems(path->nodes[0]);
  684. if (!nritems || (path->slots[0] >= nritems - 1)) {
  685. ret = btrfs_next_leaf(root, path);
  686. if (ret == 1)
  687. found_next = 1;
  688. if (ret != 0)
  689. goto insert;
  690. slot = path->slots[0];
  691. }
  692. btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
  693. if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  694. found_key.type != BTRFS_EXTENT_CSUM_KEY) {
  695. found_next = 1;
  696. goto insert;
  697. }
  698. next_offset = found_key.offset;
  699. found_next = 1;
  700. goto insert;
  701. }
  702. /*
  703. * at this point, we know the tree has an item, but it isn't big
  704. * enough yet to put our csum in. Grow it
  705. */
  706. btrfs_release_path(path);
  707. ret = btrfs_search_slot(trans, root, &file_key, path,
  708. csum_size, 1);
  709. if (ret < 0)
  710. goto fail_unlock;
  711. if (ret > 0) {
  712. if (path->slots[0] == 0)
  713. goto insert;
  714. path->slots[0]--;
  715. }
  716. leaf = path->nodes[0];
  717. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  718. csum_offset = (bytenr - found_key.offset) >>
  719. fs_info->sb->s_blocksize_bits;
  720. if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
  721. found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  722. csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
  723. goto insert;
  724. }
  725. if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
  726. csum_size) {
  727. int extend_nr;
  728. u64 tmp;
  729. u32 diff;
  730. u32 free_space;
  731. if (btrfs_leaf_free_space(fs_info, leaf) <
  732. sizeof(struct btrfs_item) + csum_size * 2)
  733. goto insert;
  734. free_space = btrfs_leaf_free_space(fs_info, leaf) -
  735. sizeof(struct btrfs_item) - csum_size;
  736. tmp = sums->len - total_bytes;
  737. tmp >>= fs_info->sb->s_blocksize_bits;
  738. WARN_ON(tmp < 1);
  739. extend_nr = max_t(int, 1, (int)tmp);
  740. diff = (csum_offset + extend_nr) * csum_size;
  741. diff = min(diff,
  742. MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
  743. diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
  744. diff = min(free_space, diff);
  745. diff /= csum_size;
  746. diff *= csum_size;
  747. btrfs_extend_item(fs_info, path, diff);
  748. ret = 0;
  749. goto csum;
  750. }
  751. insert:
  752. btrfs_release_path(path);
  753. csum_offset = 0;
  754. if (found_next) {
  755. u64 tmp;
  756. tmp = sums->len - total_bytes;
  757. tmp >>= fs_info->sb->s_blocksize_bits;
  758. tmp = min(tmp, (next_offset - file_key.offset) >>
  759. fs_info->sb->s_blocksize_bits);
  760. tmp = max((u64)1, tmp);
  761. tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size));
  762. ins_size = csum_size * tmp;
  763. } else {
  764. ins_size = csum_size;
  765. }
  766. path->leave_spinning = 1;
  767. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  768. ins_size);
  769. path->leave_spinning = 0;
  770. if (ret < 0)
  771. goto fail_unlock;
  772. if (WARN_ON(ret != 0))
  773. goto fail_unlock;
  774. leaf = path->nodes[0];
  775. csum:
  776. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  777. item_end = (struct btrfs_csum_item *)((unsigned char *)item +
  778. btrfs_item_size_nr(leaf, path->slots[0]));
  779. item = (struct btrfs_csum_item *)((unsigned char *)item +
  780. csum_offset * csum_size);
  781. found:
  782. ins_size = (u32)(sums->len - total_bytes) >>
  783. fs_info->sb->s_blocksize_bits;
  784. ins_size *= csum_size;
  785. ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
  786. ins_size);
  787. write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
  788. ins_size);
  789. ins_size /= csum_size;
  790. total_bytes += ins_size * fs_info->sectorsize;
  791. index += ins_size;
  792. btrfs_mark_buffer_dirty(path->nodes[0]);
  793. if (total_bytes < sums->len) {
  794. btrfs_release_path(path);
  795. cond_resched();
  796. goto again;
  797. }
  798. out:
  799. btrfs_free_path(path);
  800. return ret;
  801. fail_unlock:
  802. goto out;
  803. }
  804. void btrfs_extent_item_to_extent_map(struct inode *inode,
  805. const struct btrfs_path *path,
  806. struct btrfs_file_extent_item *fi,
  807. const bool new_inline,
  808. struct extent_map *em)
  809. {
  810. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  811. struct btrfs_root *root = BTRFS_I(inode)->root;
  812. struct extent_buffer *leaf = path->nodes[0];
  813. const int slot = path->slots[0];
  814. struct btrfs_key key;
  815. u64 extent_start, extent_end;
  816. u64 bytenr;
  817. u8 type = btrfs_file_extent_type(leaf, fi);
  818. int compress_type = btrfs_file_extent_compression(leaf, fi);
  819. em->bdev = fs_info->fs_devices->latest_bdev;
  820. btrfs_item_key_to_cpu(leaf, &key, slot);
  821. extent_start = key.offset;
  822. if (type == BTRFS_FILE_EXTENT_REG ||
  823. type == BTRFS_FILE_EXTENT_PREALLOC) {
  824. extent_end = extent_start +
  825. btrfs_file_extent_num_bytes(leaf, fi);
  826. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  827. size_t size;
  828. size = btrfs_file_extent_inline_len(leaf, slot, fi);
  829. extent_end = ALIGN(extent_start + size,
  830. fs_info->sectorsize);
  831. }
  832. em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
  833. if (type == BTRFS_FILE_EXTENT_REG ||
  834. type == BTRFS_FILE_EXTENT_PREALLOC) {
  835. em->start = extent_start;
  836. em->len = extent_end - extent_start;
  837. em->orig_start = extent_start -
  838. btrfs_file_extent_offset(leaf, fi);
  839. em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
  840. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  841. if (bytenr == 0) {
  842. em->block_start = EXTENT_MAP_HOLE;
  843. return;
  844. }
  845. if (compress_type != BTRFS_COMPRESS_NONE) {
  846. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  847. em->compress_type = compress_type;
  848. em->block_start = bytenr;
  849. em->block_len = em->orig_block_len;
  850. } else {
  851. bytenr += btrfs_file_extent_offset(leaf, fi);
  852. em->block_start = bytenr;
  853. em->block_len = em->len;
  854. if (type == BTRFS_FILE_EXTENT_PREALLOC)
  855. set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
  856. }
  857. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  858. em->block_start = EXTENT_MAP_INLINE;
  859. em->start = extent_start;
  860. em->len = extent_end - extent_start;
  861. /*
  862. * Initialize orig_start and block_len with the same values
  863. * as in inode.c:btrfs_get_extent().
  864. */
  865. em->orig_start = EXTENT_MAP_HOLE;
  866. em->block_len = (u64)-1;
  867. if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
  868. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  869. em->compress_type = compress_type;
  870. }
  871. } else {
  872. btrfs_err(fs_info,
  873. "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
  874. type, btrfs_ino(inode), extent_start,
  875. root->root_key.objectid);
  876. }
  877. }