file-item.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/bio.h>
  19. #include <linux/slab.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/highmem.h>
  22. #include "ctree.h"
  23. #include "disk-io.h"
  24. #include "transaction.h"
  25. #include "volumes.h"
  26. #include "print-tree.h"
  27. #include "compression.h"
  28. #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
  29. sizeof(struct btrfs_item) * 2) / \
  30. size) - 1))
  31. #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
  32. PAGE_SIZE))
  33. #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
  34. sizeof(struct btrfs_ordered_sum)) / \
  35. sizeof(u32) * (fs_info)->sectorsize)
  36. int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
  37. struct btrfs_root *root,
  38. u64 objectid, u64 pos,
  39. u64 disk_offset, u64 disk_num_bytes,
  40. u64 num_bytes, u64 offset, u64 ram_bytes,
  41. u8 compression, u8 encryption, u16 other_encoding)
  42. {
  43. int ret = 0;
  44. struct btrfs_file_extent_item *item;
  45. struct btrfs_key file_key;
  46. struct btrfs_path *path;
  47. struct extent_buffer *leaf;
  48. path = btrfs_alloc_path();
  49. if (!path)
  50. return -ENOMEM;
  51. file_key.objectid = objectid;
  52. file_key.offset = pos;
  53. file_key.type = BTRFS_EXTENT_DATA_KEY;
  54. path->leave_spinning = 1;
  55. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  56. sizeof(*item));
  57. if (ret < 0)
  58. goto out;
  59. BUG_ON(ret); /* Can't happen */
  60. leaf = path->nodes[0];
  61. item = btrfs_item_ptr(leaf, path->slots[0],
  62. struct btrfs_file_extent_item);
  63. btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
  64. btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
  65. btrfs_set_file_extent_offset(leaf, item, offset);
  66. btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
  67. btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
  68. btrfs_set_file_extent_generation(leaf, item, trans->transid);
  69. btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
  70. btrfs_set_file_extent_compression(leaf, item, compression);
  71. btrfs_set_file_extent_encryption(leaf, item, encryption);
  72. btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
  73. btrfs_mark_buffer_dirty(leaf);
  74. out:
  75. btrfs_free_path(path);
  76. return ret;
  77. }
  78. static struct btrfs_csum_item *
  79. btrfs_lookup_csum(struct btrfs_trans_handle *trans,
  80. struct btrfs_root *root,
  81. struct btrfs_path *path,
  82. u64 bytenr, int cow)
  83. {
  84. struct btrfs_fs_info *fs_info = root->fs_info;
  85. int ret;
  86. struct btrfs_key file_key;
  87. struct btrfs_key found_key;
  88. struct btrfs_csum_item *item;
  89. struct extent_buffer *leaf;
  90. u64 csum_offset = 0;
  91. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  92. int csums_in_item;
  93. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  94. file_key.offset = bytenr;
  95. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  96. ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
  97. if (ret < 0)
  98. goto fail;
  99. leaf = path->nodes[0];
  100. if (ret > 0) {
  101. ret = 1;
  102. if (path->slots[0] == 0)
  103. goto fail;
  104. path->slots[0]--;
  105. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  106. if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
  107. goto fail;
  108. csum_offset = (bytenr - found_key.offset) >>
  109. fs_info->sb->s_blocksize_bits;
  110. csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
  111. csums_in_item /= csum_size;
  112. if (csum_offset == csums_in_item) {
  113. ret = -EFBIG;
  114. goto fail;
  115. } else if (csum_offset > csums_in_item) {
  116. goto fail;
  117. }
  118. }
  119. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  120. item = (struct btrfs_csum_item *)((unsigned char *)item +
  121. csum_offset * csum_size);
  122. return item;
  123. fail:
  124. if (ret > 0)
  125. ret = -ENOENT;
  126. return ERR_PTR(ret);
  127. }
  128. int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
  129. struct btrfs_root *root,
  130. struct btrfs_path *path, u64 objectid,
  131. u64 offset, int mod)
  132. {
  133. int ret;
  134. struct btrfs_key file_key;
  135. int ins_len = mod < 0 ? -1 : 0;
  136. int cow = mod != 0;
  137. file_key.objectid = objectid;
  138. file_key.offset = offset;
  139. file_key.type = BTRFS_EXTENT_DATA_KEY;
  140. ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
  141. return ret;
  142. }
  143. static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
  144. {
  145. kfree(bio->csum_allocated);
  146. }
  147. static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
  148. u64 logical_offset, u32 *dst, int dio)
  149. {
  150. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  151. struct bio_vec bvec;
  152. struct bvec_iter iter;
  153. struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
  154. struct btrfs_csum_item *item = NULL;
  155. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  156. struct btrfs_path *path;
  157. u8 *csum;
  158. u64 offset = 0;
  159. u64 item_start_offset = 0;
  160. u64 item_last_offset = 0;
  161. u64 disk_bytenr;
  162. u64 page_bytes_left;
  163. u32 diff;
  164. int nblocks;
  165. int count = 0;
  166. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  167. path = btrfs_alloc_path();
  168. if (!path)
  169. return BLK_STS_RESOURCE;
  170. nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
  171. if (!dst) {
  172. if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
  173. btrfs_bio->csum_allocated = kmalloc_array(nblocks,
  174. csum_size, GFP_NOFS);
  175. if (!btrfs_bio->csum_allocated) {
  176. btrfs_free_path(path);
  177. return BLK_STS_RESOURCE;
  178. }
  179. btrfs_bio->csum = btrfs_bio->csum_allocated;
  180. btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
  181. } else {
  182. btrfs_bio->csum = btrfs_bio->csum_inline;
  183. }
  184. csum = btrfs_bio->csum;
  185. } else {
  186. csum = (u8 *)dst;
  187. }
  188. if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
  189. path->reada = READA_FORWARD;
  190. /*
  191. * the free space stuff is only read when it hasn't been
  192. * updated in the current transaction. So, we can safely
  193. * read from the commit root and sidestep a nasty deadlock
  194. * between reading the free space cache and updating the csum tree.
  195. */
  196. if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
  197. path->search_commit_root = 1;
  198. path->skip_locking = 1;
  199. }
  200. disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
  201. if (dio)
  202. offset = logical_offset;
  203. bio_for_each_segment(bvec, bio, iter) {
  204. page_bytes_left = bvec.bv_len;
  205. if (count)
  206. goto next;
  207. if (!dio)
  208. offset = page_offset(bvec.bv_page) + bvec.bv_offset;
  209. count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
  210. (u32 *)csum, nblocks);
  211. if (count)
  212. goto found;
  213. if (!item || disk_bytenr < item_start_offset ||
  214. disk_bytenr >= item_last_offset) {
  215. struct btrfs_key found_key;
  216. u32 item_size;
  217. if (item)
  218. btrfs_release_path(path);
  219. item = btrfs_lookup_csum(NULL, fs_info->csum_root,
  220. path, disk_bytenr, 0);
  221. if (IS_ERR(item)) {
  222. count = 1;
  223. memset(csum, 0, csum_size);
  224. if (BTRFS_I(inode)->root->root_key.objectid ==
  225. BTRFS_DATA_RELOC_TREE_OBJECTID) {
  226. set_extent_bits(io_tree, offset,
  227. offset + fs_info->sectorsize - 1,
  228. EXTENT_NODATASUM);
  229. } else {
  230. btrfs_info_rl(fs_info,
  231. "no csum found for inode %llu start %llu",
  232. btrfs_ino(BTRFS_I(inode)), offset);
  233. }
  234. item = NULL;
  235. btrfs_release_path(path);
  236. goto found;
  237. }
  238. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  239. path->slots[0]);
  240. item_start_offset = found_key.offset;
  241. item_size = btrfs_item_size_nr(path->nodes[0],
  242. path->slots[0]);
  243. item_last_offset = item_start_offset +
  244. (item_size / csum_size) *
  245. fs_info->sectorsize;
  246. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  247. struct btrfs_csum_item);
  248. }
  249. /*
  250. * this byte range must be able to fit inside
  251. * a single leaf so it will also fit inside a u32
  252. */
  253. diff = disk_bytenr - item_start_offset;
  254. diff = diff / fs_info->sectorsize;
  255. diff = diff * csum_size;
  256. count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
  257. inode->i_sb->s_blocksize_bits);
  258. read_extent_buffer(path->nodes[0], csum,
  259. ((unsigned long)item) + diff,
  260. csum_size * count);
  261. found:
  262. csum += count * csum_size;
  263. nblocks -= count;
  264. next:
  265. while (count--) {
  266. disk_bytenr += fs_info->sectorsize;
  267. offset += fs_info->sectorsize;
  268. page_bytes_left -= fs_info->sectorsize;
  269. if (!page_bytes_left)
  270. break; /* move to next bio */
  271. }
  272. }
  273. WARN_ON_ONCE(count);
  274. btrfs_free_path(path);
  275. return 0;
  276. }
  277. blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
  278. {
  279. return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
  280. }
  281. blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
  282. {
  283. return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
  284. }
  285. int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
  286. struct list_head *list, int search_commit)
  287. {
  288. struct btrfs_fs_info *fs_info = root->fs_info;
  289. struct btrfs_key key;
  290. struct btrfs_path *path;
  291. struct extent_buffer *leaf;
  292. struct btrfs_ordered_sum *sums;
  293. struct btrfs_csum_item *item;
  294. LIST_HEAD(tmplist);
  295. unsigned long offset;
  296. int ret;
  297. size_t size;
  298. u64 csum_end;
  299. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  300. ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
  301. IS_ALIGNED(end + 1, fs_info->sectorsize));
  302. path = btrfs_alloc_path();
  303. if (!path)
  304. return -ENOMEM;
  305. if (search_commit) {
  306. path->skip_locking = 1;
  307. path->reada = READA_FORWARD;
  308. path->search_commit_root = 1;
  309. }
  310. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  311. key.offset = start;
  312. key.type = BTRFS_EXTENT_CSUM_KEY;
  313. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  314. if (ret < 0)
  315. goto fail;
  316. if (ret > 0 && path->slots[0] > 0) {
  317. leaf = path->nodes[0];
  318. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  319. if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
  320. key.type == BTRFS_EXTENT_CSUM_KEY) {
  321. offset = (start - key.offset) >>
  322. fs_info->sb->s_blocksize_bits;
  323. if (offset * csum_size <
  324. btrfs_item_size_nr(leaf, path->slots[0] - 1))
  325. path->slots[0]--;
  326. }
  327. }
  328. while (start <= end) {
  329. leaf = path->nodes[0];
  330. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  331. ret = btrfs_next_leaf(root, path);
  332. if (ret < 0)
  333. goto fail;
  334. if (ret > 0)
  335. break;
  336. leaf = path->nodes[0];
  337. }
  338. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  339. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  340. key.type != BTRFS_EXTENT_CSUM_KEY ||
  341. key.offset > end)
  342. break;
  343. if (key.offset > start)
  344. start = key.offset;
  345. size = btrfs_item_size_nr(leaf, path->slots[0]);
  346. csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
  347. if (csum_end <= start) {
  348. path->slots[0]++;
  349. continue;
  350. }
  351. csum_end = min(csum_end, end + 1);
  352. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  353. struct btrfs_csum_item);
  354. while (start < csum_end) {
  355. size = min_t(size_t, csum_end - start,
  356. MAX_ORDERED_SUM_BYTES(fs_info));
  357. sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
  358. GFP_NOFS);
  359. if (!sums) {
  360. ret = -ENOMEM;
  361. goto fail;
  362. }
  363. sums->bytenr = start;
  364. sums->len = (int)size;
  365. offset = (start - key.offset) >>
  366. fs_info->sb->s_blocksize_bits;
  367. offset *= csum_size;
  368. size >>= fs_info->sb->s_blocksize_bits;
  369. read_extent_buffer(path->nodes[0],
  370. sums->sums,
  371. ((unsigned long)item) + offset,
  372. csum_size * size);
  373. start += fs_info->sectorsize * size;
  374. list_add_tail(&sums->list, &tmplist);
  375. }
  376. path->slots[0]++;
  377. }
  378. ret = 0;
  379. fail:
  380. while (ret < 0 && !list_empty(&tmplist)) {
  381. sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
  382. list_del(&sums->list);
  383. kfree(sums);
  384. }
  385. list_splice_tail(&tmplist, list);
  386. btrfs_free_path(path);
  387. return ret;
  388. }
  389. blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
  390. u64 file_start, int contig)
  391. {
  392. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  393. struct btrfs_ordered_sum *sums;
  394. struct btrfs_ordered_extent *ordered = NULL;
  395. char *data;
  396. struct bvec_iter iter;
  397. struct bio_vec bvec;
  398. int index;
  399. int nr_sectors;
  400. unsigned long total_bytes = 0;
  401. unsigned long this_sum_bytes = 0;
  402. int i;
  403. u64 offset;
  404. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
  405. GFP_NOFS);
  406. if (!sums)
  407. return BLK_STS_RESOURCE;
  408. sums->len = bio->bi_iter.bi_size;
  409. INIT_LIST_HEAD(&sums->list);
  410. if (contig)
  411. offset = file_start;
  412. else
  413. offset = 0; /* shut up gcc */
  414. sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
  415. index = 0;
  416. bio_for_each_segment(bvec, bio, iter) {
  417. if (!contig)
  418. offset = page_offset(bvec.bv_page) + bvec.bv_offset;
  419. if (!ordered) {
  420. ordered = btrfs_lookup_ordered_extent(inode, offset);
  421. BUG_ON(!ordered); /* Logic error */
  422. }
  423. data = kmap_atomic(bvec.bv_page);
  424. nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
  425. bvec.bv_len + fs_info->sectorsize
  426. - 1);
  427. for (i = 0; i < nr_sectors; i++) {
  428. if (offset >= ordered->file_offset + ordered->len ||
  429. offset < ordered->file_offset) {
  430. unsigned long bytes_left;
  431. kunmap_atomic(data);
  432. sums->len = this_sum_bytes;
  433. this_sum_bytes = 0;
  434. btrfs_add_ordered_sum(inode, ordered, sums);
  435. btrfs_put_ordered_extent(ordered);
  436. bytes_left = bio->bi_iter.bi_size - total_bytes;
  437. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
  438. GFP_NOFS);
  439. BUG_ON(!sums); /* -ENOMEM */
  440. sums->len = bytes_left;
  441. ordered = btrfs_lookup_ordered_extent(inode,
  442. offset);
  443. ASSERT(ordered); /* Logic error */
  444. sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
  445. + total_bytes;
  446. index = 0;
  447. data = kmap_atomic(bvec.bv_page);
  448. }
  449. sums->sums[index] = ~(u32)0;
  450. sums->sums[index]
  451. = btrfs_csum_data(data + bvec.bv_offset
  452. + (i * fs_info->sectorsize),
  453. sums->sums[index],
  454. fs_info->sectorsize);
  455. btrfs_csum_final(sums->sums[index],
  456. (char *)(sums->sums + index));
  457. index++;
  458. offset += fs_info->sectorsize;
  459. this_sum_bytes += fs_info->sectorsize;
  460. total_bytes += fs_info->sectorsize;
  461. }
  462. kunmap_atomic(data);
  463. }
  464. this_sum_bytes = 0;
  465. btrfs_add_ordered_sum(inode, ordered, sums);
  466. btrfs_put_ordered_extent(ordered);
  467. return 0;
  468. }
  469. /*
  470. * helper function for csum removal, this expects the
  471. * key to describe the csum pointed to by the path, and it expects
  472. * the csum to overlap the range [bytenr, len]
  473. *
  474. * The csum should not be entirely contained in the range and the
  475. * range should not be entirely contained in the csum.
  476. *
  477. * This calls btrfs_truncate_item with the correct args based on the
  478. * overlap, and fixes up the key as required.
  479. */
  480. static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
  481. struct btrfs_path *path,
  482. struct btrfs_key *key,
  483. u64 bytenr, u64 len)
  484. {
  485. struct extent_buffer *leaf;
  486. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  487. u64 csum_end;
  488. u64 end_byte = bytenr + len;
  489. u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
  490. leaf = path->nodes[0];
  491. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  492. csum_end <<= fs_info->sb->s_blocksize_bits;
  493. csum_end += key->offset;
  494. if (key->offset < bytenr && csum_end <= end_byte) {
  495. /*
  496. * [ bytenr - len ]
  497. * [ ]
  498. * [csum ]
  499. * A simple truncate off the end of the item
  500. */
  501. u32 new_size = (bytenr - key->offset) >> blocksize_bits;
  502. new_size *= csum_size;
  503. btrfs_truncate_item(fs_info, path, new_size, 1);
  504. } else if (key->offset >= bytenr && csum_end > end_byte &&
  505. end_byte > key->offset) {
  506. /*
  507. * [ bytenr - len ]
  508. * [ ]
  509. * [csum ]
  510. * we need to truncate from the beginning of the csum
  511. */
  512. u32 new_size = (csum_end - end_byte) >> blocksize_bits;
  513. new_size *= csum_size;
  514. btrfs_truncate_item(fs_info, path, new_size, 0);
  515. key->offset = end_byte;
  516. btrfs_set_item_key_safe(fs_info, path, key);
  517. } else {
  518. BUG();
  519. }
  520. }
  521. /*
  522. * deletes the csum items from the csum tree for a given
  523. * range of bytes.
  524. */
  525. int btrfs_del_csums(struct btrfs_trans_handle *trans,
  526. struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
  527. {
  528. struct btrfs_root *root = fs_info->csum_root;
  529. struct btrfs_path *path;
  530. struct btrfs_key key;
  531. u64 end_byte = bytenr + len;
  532. u64 csum_end;
  533. struct extent_buffer *leaf;
  534. int ret;
  535. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  536. int blocksize_bits = fs_info->sb->s_blocksize_bits;
  537. path = btrfs_alloc_path();
  538. if (!path)
  539. return -ENOMEM;
  540. while (1) {
  541. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  542. key.offset = end_byte - 1;
  543. key.type = BTRFS_EXTENT_CSUM_KEY;
  544. path->leave_spinning = 1;
  545. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  546. if (ret > 0) {
  547. if (path->slots[0] == 0)
  548. break;
  549. path->slots[0]--;
  550. } else if (ret < 0) {
  551. break;
  552. }
  553. leaf = path->nodes[0];
  554. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  555. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  556. key.type != BTRFS_EXTENT_CSUM_KEY) {
  557. break;
  558. }
  559. if (key.offset >= end_byte)
  560. break;
  561. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  562. csum_end <<= blocksize_bits;
  563. csum_end += key.offset;
  564. /* this csum ends before we start, we're done */
  565. if (csum_end <= bytenr)
  566. break;
  567. /* delete the entire item, it is inside our range */
  568. if (key.offset >= bytenr && csum_end <= end_byte) {
  569. int del_nr = 1;
  570. /*
  571. * Check how many csum items preceding this one in this
  572. * leaf correspond to our range and then delete them all
  573. * at once.
  574. */
  575. if (key.offset > bytenr && path->slots[0] > 0) {
  576. int slot = path->slots[0] - 1;
  577. while (slot >= 0) {
  578. struct btrfs_key pk;
  579. btrfs_item_key_to_cpu(leaf, &pk, slot);
  580. if (pk.offset < bytenr ||
  581. pk.type != BTRFS_EXTENT_CSUM_KEY ||
  582. pk.objectid !=
  583. BTRFS_EXTENT_CSUM_OBJECTID)
  584. break;
  585. path->slots[0] = slot;
  586. del_nr++;
  587. key.offset = pk.offset;
  588. slot--;
  589. }
  590. }
  591. ret = btrfs_del_items(trans, root, path,
  592. path->slots[0], del_nr);
  593. if (ret)
  594. goto out;
  595. if (key.offset == bytenr)
  596. break;
  597. } else if (key.offset < bytenr && csum_end > end_byte) {
  598. unsigned long offset;
  599. unsigned long shift_len;
  600. unsigned long item_offset;
  601. /*
  602. * [ bytenr - len ]
  603. * [csum ]
  604. *
  605. * Our bytes are in the middle of the csum,
  606. * we need to split this item and insert a new one.
  607. *
  608. * But we can't drop the path because the
  609. * csum could change, get removed, extended etc.
  610. *
  611. * The trick here is the max size of a csum item leaves
  612. * enough room in the tree block for a single
  613. * item header. So, we split the item in place,
  614. * adding a new header pointing to the existing
  615. * bytes. Then we loop around again and we have
  616. * a nicely formed csum item that we can neatly
  617. * truncate.
  618. */
  619. offset = (bytenr - key.offset) >> blocksize_bits;
  620. offset *= csum_size;
  621. shift_len = (len >> blocksize_bits) * csum_size;
  622. item_offset = btrfs_item_ptr_offset(leaf,
  623. path->slots[0]);
  624. memzero_extent_buffer(leaf, item_offset + offset,
  625. shift_len);
  626. key.offset = bytenr;
  627. /*
  628. * btrfs_split_item returns -EAGAIN when the
  629. * item changed size or key
  630. */
  631. ret = btrfs_split_item(trans, root, path, &key, offset);
  632. if (ret && ret != -EAGAIN) {
  633. btrfs_abort_transaction(trans, ret);
  634. goto out;
  635. }
  636. key.offset = end_byte - 1;
  637. } else {
  638. truncate_one_csum(fs_info, path, &key, bytenr, len);
  639. if (key.offset < bytenr)
  640. break;
  641. }
  642. btrfs_release_path(path);
  643. }
  644. ret = 0;
  645. out:
  646. btrfs_free_path(path);
  647. return ret;
  648. }
  649. int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
  650. struct btrfs_root *root,
  651. struct btrfs_ordered_sum *sums)
  652. {
  653. struct btrfs_fs_info *fs_info = root->fs_info;
  654. struct btrfs_key file_key;
  655. struct btrfs_key found_key;
  656. struct btrfs_path *path;
  657. struct btrfs_csum_item *item;
  658. struct btrfs_csum_item *item_end;
  659. struct extent_buffer *leaf = NULL;
  660. u64 next_offset;
  661. u64 total_bytes = 0;
  662. u64 csum_offset;
  663. u64 bytenr;
  664. u32 nritems;
  665. u32 ins_size;
  666. int index = 0;
  667. int found_next;
  668. int ret;
  669. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  670. path = btrfs_alloc_path();
  671. if (!path)
  672. return -ENOMEM;
  673. again:
  674. next_offset = (u64)-1;
  675. found_next = 0;
  676. bytenr = sums->bytenr + total_bytes;
  677. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  678. file_key.offset = bytenr;
  679. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  680. item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
  681. if (!IS_ERR(item)) {
  682. ret = 0;
  683. leaf = path->nodes[0];
  684. item_end = btrfs_item_ptr(leaf, path->slots[0],
  685. struct btrfs_csum_item);
  686. item_end = (struct btrfs_csum_item *)((char *)item_end +
  687. btrfs_item_size_nr(leaf, path->slots[0]));
  688. goto found;
  689. }
  690. ret = PTR_ERR(item);
  691. if (ret != -EFBIG && ret != -ENOENT)
  692. goto fail_unlock;
  693. if (ret == -EFBIG) {
  694. u32 item_size;
  695. /* we found one, but it isn't big enough yet */
  696. leaf = path->nodes[0];
  697. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  698. if ((item_size / csum_size) >=
  699. MAX_CSUM_ITEMS(fs_info, csum_size)) {
  700. /* already at max size, make a new one */
  701. goto insert;
  702. }
  703. } else {
  704. int slot = path->slots[0] + 1;
  705. /* we didn't find a csum item, insert one */
  706. nritems = btrfs_header_nritems(path->nodes[0]);
  707. if (!nritems || (path->slots[0] >= nritems - 1)) {
  708. ret = btrfs_next_leaf(root, path);
  709. if (ret == 1)
  710. found_next = 1;
  711. if (ret != 0)
  712. goto insert;
  713. slot = path->slots[0];
  714. }
  715. btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
  716. if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  717. found_key.type != BTRFS_EXTENT_CSUM_KEY) {
  718. found_next = 1;
  719. goto insert;
  720. }
  721. next_offset = found_key.offset;
  722. found_next = 1;
  723. goto insert;
  724. }
  725. /*
  726. * at this point, we know the tree has an item, but it isn't big
  727. * enough yet to put our csum in. Grow it
  728. */
  729. btrfs_release_path(path);
  730. ret = btrfs_search_slot(trans, root, &file_key, path,
  731. csum_size, 1);
  732. if (ret < 0)
  733. goto fail_unlock;
  734. if (ret > 0) {
  735. if (path->slots[0] == 0)
  736. goto insert;
  737. path->slots[0]--;
  738. }
  739. leaf = path->nodes[0];
  740. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  741. csum_offset = (bytenr - found_key.offset) >>
  742. fs_info->sb->s_blocksize_bits;
  743. if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
  744. found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  745. csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
  746. goto insert;
  747. }
  748. if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
  749. csum_size) {
  750. int extend_nr;
  751. u64 tmp;
  752. u32 diff;
  753. u32 free_space;
  754. if (btrfs_leaf_free_space(fs_info, leaf) <
  755. sizeof(struct btrfs_item) + csum_size * 2)
  756. goto insert;
  757. free_space = btrfs_leaf_free_space(fs_info, leaf) -
  758. sizeof(struct btrfs_item) - csum_size;
  759. tmp = sums->len - total_bytes;
  760. tmp >>= fs_info->sb->s_blocksize_bits;
  761. WARN_ON(tmp < 1);
  762. extend_nr = max_t(int, 1, (int)tmp);
  763. diff = (csum_offset + extend_nr) * csum_size;
  764. diff = min(diff,
  765. MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
  766. diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
  767. diff = min(free_space, diff);
  768. diff /= csum_size;
  769. diff *= csum_size;
  770. btrfs_extend_item(fs_info, path, diff);
  771. ret = 0;
  772. goto csum;
  773. }
  774. insert:
  775. btrfs_release_path(path);
  776. csum_offset = 0;
  777. if (found_next) {
  778. u64 tmp;
  779. tmp = sums->len - total_bytes;
  780. tmp >>= fs_info->sb->s_blocksize_bits;
  781. tmp = min(tmp, (next_offset - file_key.offset) >>
  782. fs_info->sb->s_blocksize_bits);
  783. tmp = max_t(u64, 1, tmp);
  784. tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
  785. ins_size = csum_size * tmp;
  786. } else {
  787. ins_size = csum_size;
  788. }
  789. path->leave_spinning = 1;
  790. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  791. ins_size);
  792. path->leave_spinning = 0;
  793. if (ret < 0)
  794. goto fail_unlock;
  795. if (WARN_ON(ret != 0))
  796. goto fail_unlock;
  797. leaf = path->nodes[0];
  798. csum:
  799. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  800. item_end = (struct btrfs_csum_item *)((unsigned char *)item +
  801. btrfs_item_size_nr(leaf, path->slots[0]));
  802. item = (struct btrfs_csum_item *)((unsigned char *)item +
  803. csum_offset * csum_size);
  804. found:
  805. ins_size = (u32)(sums->len - total_bytes) >>
  806. fs_info->sb->s_blocksize_bits;
  807. ins_size *= csum_size;
  808. ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
  809. ins_size);
  810. write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
  811. ins_size);
  812. ins_size /= csum_size;
  813. total_bytes += ins_size * fs_info->sectorsize;
  814. index += ins_size;
  815. btrfs_mark_buffer_dirty(path->nodes[0]);
  816. if (total_bytes < sums->len) {
  817. btrfs_release_path(path);
  818. cond_resched();
  819. goto again;
  820. }
  821. out:
  822. btrfs_free_path(path);
  823. return ret;
  824. fail_unlock:
  825. goto out;
  826. }
  827. void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
  828. const struct btrfs_path *path,
  829. struct btrfs_file_extent_item *fi,
  830. const bool new_inline,
  831. struct extent_map *em)
  832. {
  833. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  834. struct btrfs_root *root = inode->root;
  835. struct extent_buffer *leaf = path->nodes[0];
  836. const int slot = path->slots[0];
  837. struct btrfs_key key;
  838. u64 extent_start, extent_end;
  839. u64 bytenr;
  840. u8 type = btrfs_file_extent_type(leaf, fi);
  841. int compress_type = btrfs_file_extent_compression(leaf, fi);
  842. em->bdev = fs_info->fs_devices->latest_bdev;
  843. btrfs_item_key_to_cpu(leaf, &key, slot);
  844. extent_start = key.offset;
  845. if (type == BTRFS_FILE_EXTENT_REG ||
  846. type == BTRFS_FILE_EXTENT_PREALLOC) {
  847. extent_end = extent_start +
  848. btrfs_file_extent_num_bytes(leaf, fi);
  849. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  850. size_t size;
  851. size = btrfs_file_extent_inline_len(leaf, slot, fi);
  852. extent_end = ALIGN(extent_start + size,
  853. fs_info->sectorsize);
  854. }
  855. em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
  856. if (type == BTRFS_FILE_EXTENT_REG ||
  857. type == BTRFS_FILE_EXTENT_PREALLOC) {
  858. em->start = extent_start;
  859. em->len = extent_end - extent_start;
  860. em->orig_start = extent_start -
  861. btrfs_file_extent_offset(leaf, fi);
  862. em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
  863. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  864. if (bytenr == 0) {
  865. em->block_start = EXTENT_MAP_HOLE;
  866. return;
  867. }
  868. if (compress_type != BTRFS_COMPRESS_NONE) {
  869. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  870. em->compress_type = compress_type;
  871. em->block_start = bytenr;
  872. em->block_len = em->orig_block_len;
  873. } else {
  874. bytenr += btrfs_file_extent_offset(leaf, fi);
  875. em->block_start = bytenr;
  876. em->block_len = em->len;
  877. if (type == BTRFS_FILE_EXTENT_PREALLOC)
  878. set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
  879. }
  880. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  881. em->block_start = EXTENT_MAP_INLINE;
  882. em->start = extent_start;
  883. em->len = extent_end - extent_start;
  884. /*
  885. * Initialize orig_start and block_len with the same values
  886. * as in inode.c:btrfs_get_extent().
  887. */
  888. em->orig_start = EXTENT_MAP_HOLE;
  889. em->block_len = (u64)-1;
  890. if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
  891. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  892. em->compress_type = compress_type;
  893. }
  894. } else {
  895. btrfs_err(fs_info,
  896. "unknown file extent item type %d, inode %llu, offset %llu, "
  897. "root %llu", type, btrfs_ino(inode), extent_start,
  898. root->root_key.objectid);
  899. }
  900. }