compression.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2008 Oracle. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/bio.h>
  7. #include <linux/buffer_head.h>
  8. #include <linux/file.h>
  9. #include <linux/fs.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/highmem.h>
  12. #include <linux/time.h>
  13. #include <linux/init.h>
  14. #include <linux/string.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/mpage.h>
  17. #include <linux/swap.h>
  18. #include <linux/writeback.h>
  19. #include <linux/bit_spinlock.h>
  20. #include <linux/slab.h>
  21. #include <linux/sched/mm.h>
  22. #include <linux/log2.h>
  23. #include "ctree.h"
  24. #include "disk-io.h"
  25. #include "transaction.h"
  26. #include "btrfs_inode.h"
  27. #include "volumes.h"
  28. #include "ordered-data.h"
  29. #include "compression.h"
  30. #include "extent_io.h"
  31. #include "extent_map.h"
  32. static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  33. const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  34. {
  35. switch (type) {
  36. case BTRFS_COMPRESS_ZLIB:
  37. case BTRFS_COMPRESS_LZO:
  38. case BTRFS_COMPRESS_ZSTD:
  39. case BTRFS_COMPRESS_NONE:
  40. return btrfs_compress_types[type];
  41. }
  42. return NULL;
  43. }
  44. static int btrfs_decompress_bio(struct compressed_bio *cb);
  45. static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
  46. unsigned long disk_size)
  47. {
  48. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  49. return sizeof(struct compressed_bio) +
  50. (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
  51. }
  52. static int check_compressed_csum(struct btrfs_inode *inode,
  53. struct compressed_bio *cb,
  54. u64 disk_start)
  55. {
  56. int ret;
  57. struct page *page;
  58. unsigned long i;
  59. char *kaddr;
  60. u32 csum;
  61. u32 *cb_sum = &cb->sums;
  62. if (inode->flags & BTRFS_INODE_NODATASUM)
  63. return 0;
  64. for (i = 0; i < cb->nr_pages; i++) {
  65. page = cb->compressed_pages[i];
  66. csum = ~(u32)0;
  67. kaddr = kmap_atomic(page);
  68. csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
  69. btrfs_csum_final(csum, (u8 *)&csum);
  70. kunmap_atomic(kaddr);
  71. if (csum != *cb_sum) {
  72. btrfs_print_data_csum_error(inode, disk_start, csum,
  73. *cb_sum, cb->mirror_num);
  74. ret = -EIO;
  75. goto fail;
  76. }
  77. cb_sum++;
  78. }
  79. ret = 0;
  80. fail:
  81. return ret;
  82. }
  83. /* when we finish reading compressed pages from the disk, we
  84. * decompress them and then run the bio end_io routines on the
  85. * decompressed pages (in the inode address space).
  86. *
  87. * This allows the checksumming and other IO error handling routines
  88. * to work normally
  89. *
  90. * The compressed pages are freed here, and it must be run
  91. * in process context
  92. */
  93. static void end_compressed_bio_read(struct bio *bio)
  94. {
  95. struct compressed_bio *cb = bio->bi_private;
  96. struct inode *inode;
  97. struct page *page;
  98. unsigned long index;
  99. unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
  100. int ret = 0;
  101. if (bio->bi_status)
  102. cb->errors = 1;
  103. /* if there are more bios still pending for this compressed
  104. * extent, just exit
  105. */
  106. if (!refcount_dec_and_test(&cb->pending_bios))
  107. goto out;
  108. /*
  109. * Record the correct mirror_num in cb->orig_bio so that
  110. * read-repair can work properly.
  111. */
  112. ASSERT(btrfs_io_bio(cb->orig_bio));
  113. btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
  114. cb->mirror_num = mirror;
  115. /*
  116. * Some IO in this cb have failed, just skip checksum as there
  117. * is no way it could be correct.
  118. */
  119. if (cb->errors == 1)
  120. goto csum_failed;
  121. inode = cb->inode;
  122. ret = check_compressed_csum(BTRFS_I(inode), cb,
  123. (u64)bio->bi_iter.bi_sector << 9);
  124. if (ret)
  125. goto csum_failed;
  126. /* ok, we're the last bio for this extent, lets start
  127. * the decompression.
  128. */
  129. ret = btrfs_decompress_bio(cb);
  130. csum_failed:
  131. if (ret)
  132. cb->errors = 1;
  133. /* release the compressed pages */
  134. index = 0;
  135. for (index = 0; index < cb->nr_pages; index++) {
  136. page = cb->compressed_pages[index];
  137. page->mapping = NULL;
  138. put_page(page);
  139. }
  140. /* do io completion on the original bio */
  141. if (cb->errors) {
  142. bio_io_error(cb->orig_bio);
  143. } else {
  144. int i;
  145. struct bio_vec *bvec;
  146. /*
  147. * we have verified the checksum already, set page
  148. * checked so the end_io handlers know about it
  149. */
  150. ASSERT(!bio_flagged(bio, BIO_CLONED));
  151. bio_for_each_segment_all(bvec, cb->orig_bio, i)
  152. SetPageChecked(bvec->bv_page);
  153. bio_endio(cb->orig_bio);
  154. }
  155. /* finally free the cb struct */
  156. kfree(cb->compressed_pages);
  157. kfree(cb);
  158. out:
  159. bio_put(bio);
  160. }
  161. /*
  162. * Clear the writeback bits on all of the file
  163. * pages for a compressed write
  164. */
  165. static noinline void end_compressed_writeback(struct inode *inode,
  166. const struct compressed_bio *cb)
  167. {
  168. unsigned long index = cb->start >> PAGE_SHIFT;
  169. unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
  170. struct page *pages[16];
  171. unsigned long nr_pages = end_index - index + 1;
  172. int i;
  173. int ret;
  174. if (cb->errors)
  175. mapping_set_error(inode->i_mapping, -EIO);
  176. while (nr_pages > 0) {
  177. ret = find_get_pages_contig(inode->i_mapping, index,
  178. min_t(unsigned long,
  179. nr_pages, ARRAY_SIZE(pages)), pages);
  180. if (ret == 0) {
  181. nr_pages -= 1;
  182. index += 1;
  183. continue;
  184. }
  185. for (i = 0; i < ret; i++) {
  186. if (cb->errors)
  187. SetPageError(pages[i]);
  188. end_page_writeback(pages[i]);
  189. put_page(pages[i]);
  190. }
  191. nr_pages -= ret;
  192. index += ret;
  193. }
  194. /* the inode may be gone now */
  195. }
  196. /*
  197. * do the cleanup once all the compressed pages hit the disk.
  198. * This will clear writeback on the file pages and free the compressed
  199. * pages.
  200. *
  201. * This also calls the writeback end hooks for the file pages so that
  202. * metadata and checksums can be updated in the file.
  203. */
  204. static void end_compressed_bio_write(struct bio *bio)
  205. {
  206. struct extent_io_tree *tree;
  207. struct compressed_bio *cb = bio->bi_private;
  208. struct inode *inode;
  209. struct page *page;
  210. unsigned long index;
  211. if (bio->bi_status)
  212. cb->errors = 1;
  213. /* if there are more bios still pending for this compressed
  214. * extent, just exit
  215. */
  216. if (!refcount_dec_and_test(&cb->pending_bios))
  217. goto out;
  218. /* ok, we're the last bio for this extent, step one is to
  219. * call back into the FS and do all the end_io operations
  220. */
  221. inode = cb->inode;
  222. tree = &BTRFS_I(inode)->io_tree;
  223. cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
  224. tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
  225. cb->start,
  226. cb->start + cb->len - 1,
  227. NULL,
  228. bio->bi_status ?
  229. BLK_STS_OK : BLK_STS_NOTSUPP);
  230. cb->compressed_pages[0]->mapping = NULL;
  231. end_compressed_writeback(inode, cb);
  232. /* note, our inode could be gone now */
  233. /*
  234. * release the compressed pages, these came from alloc_page and
  235. * are not attached to the inode at all
  236. */
  237. index = 0;
  238. for (index = 0; index < cb->nr_pages; index++) {
  239. page = cb->compressed_pages[index];
  240. page->mapping = NULL;
  241. put_page(page);
  242. }
  243. /* finally free the cb struct */
  244. kfree(cb->compressed_pages);
  245. kfree(cb);
  246. out:
  247. bio_put(bio);
  248. }
  249. /*
  250. * worker function to build and submit bios for previously compressed pages.
  251. * The corresponding pages in the inode should be marked for writeback
  252. * and the compressed pages should have a reference on them for dropping
  253. * when the IO is complete.
  254. *
  255. * This also checksums the file bytes and gets things ready for
  256. * the end io hooks.
  257. */
  258. blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
  259. unsigned long len, u64 disk_start,
  260. unsigned long compressed_len,
  261. struct page **compressed_pages,
  262. unsigned long nr_pages,
  263. unsigned int write_flags)
  264. {
  265. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  266. struct bio *bio = NULL;
  267. struct compressed_bio *cb;
  268. unsigned long bytes_left;
  269. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  270. int pg_index = 0;
  271. struct page *page;
  272. u64 first_byte = disk_start;
  273. struct block_device *bdev;
  274. blk_status_t ret;
  275. int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
  276. WARN_ON(start & ((u64)PAGE_SIZE - 1));
  277. cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
  278. if (!cb)
  279. return BLK_STS_RESOURCE;
  280. refcount_set(&cb->pending_bios, 0);
  281. cb->errors = 0;
  282. cb->inode = inode;
  283. cb->start = start;
  284. cb->len = len;
  285. cb->mirror_num = 0;
  286. cb->compressed_pages = compressed_pages;
  287. cb->compressed_len = compressed_len;
  288. cb->orig_bio = NULL;
  289. cb->nr_pages = nr_pages;
  290. bdev = fs_info->fs_devices->latest_bdev;
  291. bio = btrfs_bio_alloc(bdev, first_byte);
  292. bio->bi_opf = REQ_OP_WRITE | write_flags;
  293. bio->bi_private = cb;
  294. bio->bi_end_io = end_compressed_bio_write;
  295. refcount_set(&cb->pending_bios, 1);
  296. /* create and submit bios for the compressed pages */
  297. bytes_left = compressed_len;
  298. for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
  299. int submit = 0;
  300. page = compressed_pages[pg_index];
  301. page->mapping = inode->i_mapping;
  302. if (bio->bi_iter.bi_size)
  303. submit = io_tree->ops->merge_bio_hook(page, 0,
  304. PAGE_SIZE,
  305. bio, 0);
  306. page->mapping = NULL;
  307. if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
  308. PAGE_SIZE) {
  309. /*
  310. * inc the count before we submit the bio so
  311. * we know the end IO handler won't happen before
  312. * we inc the count. Otherwise, the cb might get
  313. * freed before we're done setting it up
  314. */
  315. refcount_inc(&cb->pending_bios);
  316. ret = btrfs_bio_wq_end_io(fs_info, bio,
  317. BTRFS_WQ_ENDIO_DATA);
  318. BUG_ON(ret); /* -ENOMEM */
  319. if (!skip_sum) {
  320. ret = btrfs_csum_one_bio(inode, bio, start, 1);
  321. BUG_ON(ret); /* -ENOMEM */
  322. }
  323. ret = btrfs_map_bio(fs_info, bio, 0, 1);
  324. if (ret) {
  325. bio->bi_status = ret;
  326. bio_endio(bio);
  327. }
  328. bio = btrfs_bio_alloc(bdev, first_byte);
  329. bio->bi_opf = REQ_OP_WRITE | write_flags;
  330. bio->bi_private = cb;
  331. bio->bi_end_io = end_compressed_bio_write;
  332. bio_add_page(bio, page, PAGE_SIZE, 0);
  333. }
  334. if (bytes_left < PAGE_SIZE) {
  335. btrfs_info(fs_info,
  336. "bytes left %lu compress len %lu nr %lu",
  337. bytes_left, cb->compressed_len, cb->nr_pages);
  338. }
  339. bytes_left -= PAGE_SIZE;
  340. first_byte += PAGE_SIZE;
  341. cond_resched();
  342. }
  343. ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
  344. BUG_ON(ret); /* -ENOMEM */
  345. if (!skip_sum) {
  346. ret = btrfs_csum_one_bio(inode, bio, start, 1);
  347. BUG_ON(ret); /* -ENOMEM */
  348. }
  349. ret = btrfs_map_bio(fs_info, bio, 0, 1);
  350. if (ret) {
  351. bio->bi_status = ret;
  352. bio_endio(bio);
  353. }
  354. return 0;
  355. }
  356. static u64 bio_end_offset(struct bio *bio)
  357. {
  358. struct bio_vec *last = bio_last_bvec_all(bio);
  359. return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
  360. }
  361. static noinline int add_ra_bio_pages(struct inode *inode,
  362. u64 compressed_end,
  363. struct compressed_bio *cb)
  364. {
  365. unsigned long end_index;
  366. unsigned long pg_index;
  367. u64 last_offset;
  368. u64 isize = i_size_read(inode);
  369. int ret;
  370. struct page *page;
  371. unsigned long nr_pages = 0;
  372. struct extent_map *em;
  373. struct address_space *mapping = inode->i_mapping;
  374. struct extent_map_tree *em_tree;
  375. struct extent_io_tree *tree;
  376. u64 end;
  377. int misses = 0;
  378. last_offset = bio_end_offset(cb->orig_bio);
  379. em_tree = &BTRFS_I(inode)->extent_tree;
  380. tree = &BTRFS_I(inode)->io_tree;
  381. if (isize == 0)
  382. return 0;
  383. end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
  384. while (last_offset < compressed_end) {
  385. pg_index = last_offset >> PAGE_SHIFT;
  386. if (pg_index > end_index)
  387. break;
  388. rcu_read_lock();
  389. page = radix_tree_lookup(&mapping->i_pages, pg_index);
  390. rcu_read_unlock();
  391. if (page && !radix_tree_exceptional_entry(page)) {
  392. misses++;
  393. if (misses > 4)
  394. break;
  395. goto next;
  396. }
  397. page = __page_cache_alloc(mapping_gfp_constraint(mapping,
  398. ~__GFP_FS));
  399. if (!page)
  400. break;
  401. if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
  402. put_page(page);
  403. goto next;
  404. }
  405. end = last_offset + PAGE_SIZE - 1;
  406. /*
  407. * at this point, we have a locked page in the page cache
  408. * for these bytes in the file. But, we have to make
  409. * sure they map to this compressed extent on disk.
  410. */
  411. set_page_extent_mapped(page);
  412. lock_extent(tree, last_offset, end);
  413. read_lock(&em_tree->lock);
  414. em = lookup_extent_mapping(em_tree, last_offset,
  415. PAGE_SIZE);
  416. read_unlock(&em_tree->lock);
  417. if (!em || last_offset < em->start ||
  418. (last_offset + PAGE_SIZE > extent_map_end(em)) ||
  419. (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
  420. free_extent_map(em);
  421. unlock_extent(tree, last_offset, end);
  422. unlock_page(page);
  423. put_page(page);
  424. break;
  425. }
  426. free_extent_map(em);
  427. if (page->index == end_index) {
  428. char *userpage;
  429. size_t zero_offset = isize & (PAGE_SIZE - 1);
  430. if (zero_offset) {
  431. int zeros;
  432. zeros = PAGE_SIZE - zero_offset;
  433. userpage = kmap_atomic(page);
  434. memset(userpage + zero_offset, 0, zeros);
  435. flush_dcache_page(page);
  436. kunmap_atomic(userpage);
  437. }
  438. }
  439. ret = bio_add_page(cb->orig_bio, page,
  440. PAGE_SIZE, 0);
  441. if (ret == PAGE_SIZE) {
  442. nr_pages++;
  443. put_page(page);
  444. } else {
  445. unlock_extent(tree, last_offset, end);
  446. unlock_page(page);
  447. put_page(page);
  448. break;
  449. }
  450. next:
  451. last_offset += PAGE_SIZE;
  452. }
  453. return 0;
  454. }
  455. /*
  456. * for a compressed read, the bio we get passed has all the inode pages
  457. * in it. We don't actually do IO on those pages but allocate new ones
  458. * to hold the compressed pages on disk.
  459. *
  460. * bio->bi_iter.bi_sector points to the compressed extent on disk
  461. * bio->bi_io_vec points to all of the inode pages
  462. *
  463. * After the compressed pages are read, we copy the bytes into the
  464. * bio we were passed and then call the bio end_io calls
  465. */
  466. blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
  467. int mirror_num, unsigned long bio_flags)
  468. {
  469. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  470. struct extent_io_tree *tree;
  471. struct extent_map_tree *em_tree;
  472. struct compressed_bio *cb;
  473. unsigned long compressed_len;
  474. unsigned long nr_pages;
  475. unsigned long pg_index;
  476. struct page *page;
  477. struct block_device *bdev;
  478. struct bio *comp_bio;
  479. u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
  480. u64 em_len;
  481. u64 em_start;
  482. struct extent_map *em;
  483. blk_status_t ret = BLK_STS_RESOURCE;
  484. int faili = 0;
  485. u32 *sums;
  486. tree = &BTRFS_I(inode)->io_tree;
  487. em_tree = &BTRFS_I(inode)->extent_tree;
  488. /* we need the actual starting offset of this extent in the file */
  489. read_lock(&em_tree->lock);
  490. em = lookup_extent_mapping(em_tree,
  491. page_offset(bio_first_page_all(bio)),
  492. PAGE_SIZE);
  493. read_unlock(&em_tree->lock);
  494. if (!em)
  495. return BLK_STS_IOERR;
  496. compressed_len = em->block_len;
  497. cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
  498. if (!cb)
  499. goto out;
  500. refcount_set(&cb->pending_bios, 0);
  501. cb->errors = 0;
  502. cb->inode = inode;
  503. cb->mirror_num = mirror_num;
  504. sums = &cb->sums;
  505. cb->start = em->orig_start;
  506. em_len = em->len;
  507. em_start = em->start;
  508. free_extent_map(em);
  509. em = NULL;
  510. cb->len = bio->bi_iter.bi_size;
  511. cb->compressed_len = compressed_len;
  512. cb->compress_type = extent_compress_type(bio_flags);
  513. cb->orig_bio = bio;
  514. nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
  515. cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
  516. GFP_NOFS);
  517. if (!cb->compressed_pages)
  518. goto fail1;
  519. bdev = fs_info->fs_devices->latest_bdev;
  520. for (pg_index = 0; pg_index < nr_pages; pg_index++) {
  521. cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
  522. __GFP_HIGHMEM);
  523. if (!cb->compressed_pages[pg_index]) {
  524. faili = pg_index - 1;
  525. ret = BLK_STS_RESOURCE;
  526. goto fail2;
  527. }
  528. }
  529. faili = nr_pages - 1;
  530. cb->nr_pages = nr_pages;
  531. add_ra_bio_pages(inode, em_start + em_len, cb);
  532. /* include any pages we added in add_ra-bio_pages */
  533. cb->len = bio->bi_iter.bi_size;
  534. comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
  535. bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
  536. comp_bio->bi_private = cb;
  537. comp_bio->bi_end_io = end_compressed_bio_read;
  538. refcount_set(&cb->pending_bios, 1);
  539. for (pg_index = 0; pg_index < nr_pages; pg_index++) {
  540. int submit = 0;
  541. page = cb->compressed_pages[pg_index];
  542. page->mapping = inode->i_mapping;
  543. page->index = em_start >> PAGE_SHIFT;
  544. if (comp_bio->bi_iter.bi_size)
  545. submit = tree->ops->merge_bio_hook(page, 0,
  546. PAGE_SIZE,
  547. comp_bio, 0);
  548. page->mapping = NULL;
  549. if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
  550. PAGE_SIZE) {
  551. ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
  552. BTRFS_WQ_ENDIO_DATA);
  553. BUG_ON(ret); /* -ENOMEM */
  554. /*
  555. * inc the count before we submit the bio so
  556. * we know the end IO handler won't happen before
  557. * we inc the count. Otherwise, the cb might get
  558. * freed before we're done setting it up
  559. */
  560. refcount_inc(&cb->pending_bios);
  561. if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
  562. ret = btrfs_lookup_bio_sums(inode, comp_bio,
  563. sums);
  564. BUG_ON(ret); /* -ENOMEM */
  565. }
  566. sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
  567. fs_info->sectorsize);
  568. ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
  569. if (ret) {
  570. comp_bio->bi_status = ret;
  571. bio_endio(comp_bio);
  572. }
  573. comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
  574. bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
  575. comp_bio->bi_private = cb;
  576. comp_bio->bi_end_io = end_compressed_bio_read;
  577. bio_add_page(comp_bio, page, PAGE_SIZE, 0);
  578. }
  579. cur_disk_byte += PAGE_SIZE;
  580. }
  581. ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
  582. BUG_ON(ret); /* -ENOMEM */
  583. if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
  584. ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
  585. BUG_ON(ret); /* -ENOMEM */
  586. }
  587. ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
  588. if (ret) {
  589. comp_bio->bi_status = ret;
  590. bio_endio(comp_bio);
  591. }
  592. return 0;
  593. fail2:
  594. while (faili >= 0) {
  595. __free_page(cb->compressed_pages[faili]);
  596. faili--;
  597. }
  598. kfree(cb->compressed_pages);
  599. fail1:
  600. kfree(cb);
  601. out:
  602. free_extent_map(em);
  603. return ret;
  604. }
  605. /*
  606. * Heuristic uses systematic sampling to collect data from the input data
  607. * range, the logic can be tuned by the following constants:
  608. *
  609. * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
  610. * @SAMPLING_INTERVAL - range from which the sampled data can be collected
  611. */
  612. #define SAMPLING_READ_SIZE (16)
  613. #define SAMPLING_INTERVAL (256)
  614. /*
  615. * For statistical analysis of the input data we consider bytes that form a
  616. * Galois Field of 256 objects. Each object has an attribute count, ie. how
  617. * many times the object appeared in the sample.
  618. */
  619. #define BUCKET_SIZE (256)
  620. /*
  621. * The size of the sample is based on a statistical sampling rule of thumb.
  622. * The common way is to perform sampling tests as long as the number of
  623. * elements in each cell is at least 5.
  624. *
  625. * Instead of 5, we choose 32 to obtain more accurate results.
  626. * If the data contain the maximum number of symbols, which is 256, we obtain a
  627. * sample size bound by 8192.
  628. *
  629. * For a sample of at most 8KB of data per data range: 16 consecutive bytes
  630. * from up to 512 locations.
  631. */
  632. #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
  633. SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
  634. struct bucket_item {
  635. u32 count;
  636. };
  637. struct heuristic_ws {
  638. /* Partial copy of input data */
  639. u8 *sample;
  640. u32 sample_size;
  641. /* Buckets store counters for each byte value */
  642. struct bucket_item *bucket;
  643. /* Sorting buffer */
  644. struct bucket_item *bucket_b;
  645. struct list_head list;
  646. };
  647. static void free_heuristic_ws(struct list_head *ws)
  648. {
  649. struct heuristic_ws *workspace;
  650. workspace = list_entry(ws, struct heuristic_ws, list);
  651. kvfree(workspace->sample);
  652. kfree(workspace->bucket);
  653. kfree(workspace->bucket_b);
  654. kfree(workspace);
  655. }
  656. static struct list_head *alloc_heuristic_ws(void)
  657. {
  658. struct heuristic_ws *ws;
  659. ws = kzalloc(sizeof(*ws), GFP_KERNEL);
  660. if (!ws)
  661. return ERR_PTR(-ENOMEM);
  662. ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
  663. if (!ws->sample)
  664. goto fail;
  665. ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
  666. if (!ws->bucket)
  667. goto fail;
  668. ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
  669. if (!ws->bucket_b)
  670. goto fail;
  671. INIT_LIST_HEAD(&ws->list);
  672. return &ws->list;
  673. fail:
  674. free_heuristic_ws(&ws->list);
  675. return ERR_PTR(-ENOMEM);
  676. }
  677. struct workspaces_list {
  678. struct list_head idle_ws;
  679. spinlock_t ws_lock;
  680. /* Number of free workspaces */
  681. int free_ws;
  682. /* Total number of allocated workspaces */
  683. atomic_t total_ws;
  684. /* Waiters for a free workspace */
  685. wait_queue_head_t ws_wait;
  686. };
  687. static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
  688. static struct workspaces_list btrfs_heuristic_ws;
  689. static const struct btrfs_compress_op * const btrfs_compress_op[] = {
  690. &btrfs_zlib_compress,
  691. &btrfs_lzo_compress,
  692. &btrfs_zstd_compress,
  693. };
  694. void __init btrfs_init_compress(void)
  695. {
  696. struct list_head *workspace;
  697. int i;
  698. INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
  699. spin_lock_init(&btrfs_heuristic_ws.ws_lock);
  700. atomic_set(&btrfs_heuristic_ws.total_ws, 0);
  701. init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
  702. workspace = alloc_heuristic_ws();
  703. if (IS_ERR(workspace)) {
  704. pr_warn(
  705. "BTRFS: cannot preallocate heuristic workspace, will try later\n");
  706. } else {
  707. atomic_set(&btrfs_heuristic_ws.total_ws, 1);
  708. btrfs_heuristic_ws.free_ws = 1;
  709. list_add(workspace, &btrfs_heuristic_ws.idle_ws);
  710. }
  711. for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
  712. INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
  713. spin_lock_init(&btrfs_comp_ws[i].ws_lock);
  714. atomic_set(&btrfs_comp_ws[i].total_ws, 0);
  715. init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
  716. /*
  717. * Preallocate one workspace for each compression type so
  718. * we can guarantee forward progress in the worst case
  719. */
  720. workspace = btrfs_compress_op[i]->alloc_workspace();
  721. if (IS_ERR(workspace)) {
  722. pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
  723. } else {
  724. atomic_set(&btrfs_comp_ws[i].total_ws, 1);
  725. btrfs_comp_ws[i].free_ws = 1;
  726. list_add(workspace, &btrfs_comp_ws[i].idle_ws);
  727. }
  728. }
  729. }
  730. /*
  731. * This finds an available workspace or allocates a new one.
  732. * If it's not possible to allocate a new one, waits until there's one.
  733. * Preallocation makes a forward progress guarantees and we do not return
  734. * errors.
  735. */
  736. static struct list_head *__find_workspace(int type, bool heuristic)
  737. {
  738. struct list_head *workspace;
  739. int cpus = num_online_cpus();
  740. int idx = type - 1;
  741. unsigned nofs_flag;
  742. struct list_head *idle_ws;
  743. spinlock_t *ws_lock;
  744. atomic_t *total_ws;
  745. wait_queue_head_t *ws_wait;
  746. int *free_ws;
  747. if (heuristic) {
  748. idle_ws = &btrfs_heuristic_ws.idle_ws;
  749. ws_lock = &btrfs_heuristic_ws.ws_lock;
  750. total_ws = &btrfs_heuristic_ws.total_ws;
  751. ws_wait = &btrfs_heuristic_ws.ws_wait;
  752. free_ws = &btrfs_heuristic_ws.free_ws;
  753. } else {
  754. idle_ws = &btrfs_comp_ws[idx].idle_ws;
  755. ws_lock = &btrfs_comp_ws[idx].ws_lock;
  756. total_ws = &btrfs_comp_ws[idx].total_ws;
  757. ws_wait = &btrfs_comp_ws[idx].ws_wait;
  758. free_ws = &btrfs_comp_ws[idx].free_ws;
  759. }
  760. again:
  761. spin_lock(ws_lock);
  762. if (!list_empty(idle_ws)) {
  763. workspace = idle_ws->next;
  764. list_del(workspace);
  765. (*free_ws)--;
  766. spin_unlock(ws_lock);
  767. return workspace;
  768. }
  769. if (atomic_read(total_ws) > cpus) {
  770. DEFINE_WAIT(wait);
  771. spin_unlock(ws_lock);
  772. prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
  773. if (atomic_read(total_ws) > cpus && !*free_ws)
  774. schedule();
  775. finish_wait(ws_wait, &wait);
  776. goto again;
  777. }
  778. atomic_inc(total_ws);
  779. spin_unlock(ws_lock);
  780. /*
  781. * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
  782. * to turn it off here because we might get called from the restricted
  783. * context of btrfs_compress_bio/btrfs_compress_pages
  784. */
  785. nofs_flag = memalloc_nofs_save();
  786. if (heuristic)
  787. workspace = alloc_heuristic_ws();
  788. else
  789. workspace = btrfs_compress_op[idx]->alloc_workspace();
  790. memalloc_nofs_restore(nofs_flag);
  791. if (IS_ERR(workspace)) {
  792. atomic_dec(total_ws);
  793. wake_up(ws_wait);
  794. /*
  795. * Do not return the error but go back to waiting. There's a
  796. * workspace preallocated for each type and the compression
  797. * time is bounded so we get to a workspace eventually. This
  798. * makes our caller's life easier.
  799. *
  800. * To prevent silent and low-probability deadlocks (when the
  801. * initial preallocation fails), check if there are any
  802. * workspaces at all.
  803. */
  804. if (atomic_read(total_ws) == 0) {
  805. static DEFINE_RATELIMIT_STATE(_rs,
  806. /* once per minute */ 60 * HZ,
  807. /* no burst */ 1);
  808. if (__ratelimit(&_rs)) {
  809. pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
  810. }
  811. }
  812. goto again;
  813. }
  814. return workspace;
  815. }
  816. static struct list_head *find_workspace(int type)
  817. {
  818. return __find_workspace(type, false);
  819. }
  820. /*
  821. * put a workspace struct back on the list or free it if we have enough
  822. * idle ones sitting around
  823. */
  824. static void __free_workspace(int type, struct list_head *workspace,
  825. bool heuristic)
  826. {
  827. int idx = type - 1;
  828. struct list_head *idle_ws;
  829. spinlock_t *ws_lock;
  830. atomic_t *total_ws;
  831. wait_queue_head_t *ws_wait;
  832. int *free_ws;
  833. if (heuristic) {
  834. idle_ws = &btrfs_heuristic_ws.idle_ws;
  835. ws_lock = &btrfs_heuristic_ws.ws_lock;
  836. total_ws = &btrfs_heuristic_ws.total_ws;
  837. ws_wait = &btrfs_heuristic_ws.ws_wait;
  838. free_ws = &btrfs_heuristic_ws.free_ws;
  839. } else {
  840. idle_ws = &btrfs_comp_ws[idx].idle_ws;
  841. ws_lock = &btrfs_comp_ws[idx].ws_lock;
  842. total_ws = &btrfs_comp_ws[idx].total_ws;
  843. ws_wait = &btrfs_comp_ws[idx].ws_wait;
  844. free_ws = &btrfs_comp_ws[idx].free_ws;
  845. }
  846. spin_lock(ws_lock);
  847. if (*free_ws <= num_online_cpus()) {
  848. list_add(workspace, idle_ws);
  849. (*free_ws)++;
  850. spin_unlock(ws_lock);
  851. goto wake;
  852. }
  853. spin_unlock(ws_lock);
  854. if (heuristic)
  855. free_heuristic_ws(workspace);
  856. else
  857. btrfs_compress_op[idx]->free_workspace(workspace);
  858. atomic_dec(total_ws);
  859. wake:
  860. /*
  861. * Make sure counter is updated before we wake up waiters.
  862. */
  863. smp_mb();
  864. if (waitqueue_active(ws_wait))
  865. wake_up(ws_wait);
  866. }
  867. static void free_workspace(int type, struct list_head *ws)
  868. {
  869. return __free_workspace(type, ws, false);
  870. }
  871. /*
  872. * cleanup function for module exit
  873. */
  874. static void free_workspaces(void)
  875. {
  876. struct list_head *workspace;
  877. int i;
  878. while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
  879. workspace = btrfs_heuristic_ws.idle_ws.next;
  880. list_del(workspace);
  881. free_heuristic_ws(workspace);
  882. atomic_dec(&btrfs_heuristic_ws.total_ws);
  883. }
  884. for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
  885. while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
  886. workspace = btrfs_comp_ws[i].idle_ws.next;
  887. list_del(workspace);
  888. btrfs_compress_op[i]->free_workspace(workspace);
  889. atomic_dec(&btrfs_comp_ws[i].total_ws);
  890. }
  891. }
  892. }
  893. /*
  894. * Given an address space and start and length, compress the bytes into @pages
  895. * that are allocated on demand.
  896. *
  897. * @type_level is encoded algorithm and level, where level 0 means whatever
  898. * default the algorithm chooses and is opaque here;
  899. * - compression algo are 0-3
  900. * - the level are bits 4-7
  901. *
  902. * @out_pages is an in/out parameter, holds maximum number of pages to allocate
  903. * and returns number of actually allocated pages
  904. *
  905. * @total_in is used to return the number of bytes actually read. It
  906. * may be smaller than the input length if we had to exit early because we
  907. * ran out of room in the pages array or because we cross the
  908. * max_out threshold.
  909. *
  910. * @total_out is an in/out parameter, must be set to the input length and will
  911. * be also used to return the total number of compressed bytes
  912. *
  913. * @max_out tells us the max number of bytes that we're allowed to
  914. * stuff into pages
  915. */
  916. int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
  917. u64 start, struct page **pages,
  918. unsigned long *out_pages,
  919. unsigned long *total_in,
  920. unsigned long *total_out)
  921. {
  922. struct list_head *workspace;
  923. int ret;
  924. int type = type_level & 0xF;
  925. workspace = find_workspace(type);
  926. btrfs_compress_op[type - 1]->set_level(workspace, type_level);
  927. ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
  928. start, pages,
  929. out_pages,
  930. total_in, total_out);
  931. free_workspace(type, workspace);
  932. return ret;
  933. }
  934. /*
  935. * pages_in is an array of pages with compressed data.
  936. *
  937. * disk_start is the starting logical offset of this array in the file
  938. *
  939. * orig_bio contains the pages from the file that we want to decompress into
  940. *
  941. * srclen is the number of bytes in pages_in
  942. *
  943. * The basic idea is that we have a bio that was created by readpages.
  944. * The pages in the bio are for the uncompressed data, and they may not
  945. * be contiguous. They all correspond to the range of bytes covered by
  946. * the compressed extent.
  947. */
  948. static int btrfs_decompress_bio(struct compressed_bio *cb)
  949. {
  950. struct list_head *workspace;
  951. int ret;
  952. int type = cb->compress_type;
  953. workspace = find_workspace(type);
  954. ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
  955. free_workspace(type, workspace);
  956. return ret;
  957. }
  958. /*
  959. * a less complex decompression routine. Our compressed data fits in a
  960. * single page, and we want to read a single page out of it.
  961. * start_byte tells us the offset into the compressed data we're interested in
  962. */
  963. int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
  964. unsigned long start_byte, size_t srclen, size_t destlen)
  965. {
  966. struct list_head *workspace;
  967. int ret;
  968. workspace = find_workspace(type);
  969. ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
  970. dest_page, start_byte,
  971. srclen, destlen);
  972. free_workspace(type, workspace);
  973. return ret;
  974. }
  975. void __cold btrfs_exit_compress(void)
  976. {
  977. free_workspaces();
  978. }
  979. /*
  980. * Copy uncompressed data from working buffer to pages.
  981. *
  982. * buf_start is the byte offset we're of the start of our workspace buffer.
  983. *
  984. * total_out is the last byte of the buffer
  985. */
  986. int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
  987. unsigned long total_out, u64 disk_start,
  988. struct bio *bio)
  989. {
  990. unsigned long buf_offset;
  991. unsigned long current_buf_start;
  992. unsigned long start_byte;
  993. unsigned long prev_start_byte;
  994. unsigned long working_bytes = total_out - buf_start;
  995. unsigned long bytes;
  996. char *kaddr;
  997. struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
  998. /*
  999. * start byte is the first byte of the page we're currently
  1000. * copying into relative to the start of the compressed data.
  1001. */
  1002. start_byte = page_offset(bvec.bv_page) - disk_start;
  1003. /* we haven't yet hit data corresponding to this page */
  1004. if (total_out <= start_byte)
  1005. return 1;
  1006. /*
  1007. * the start of the data we care about is offset into
  1008. * the middle of our working buffer
  1009. */
  1010. if (total_out > start_byte && buf_start < start_byte) {
  1011. buf_offset = start_byte - buf_start;
  1012. working_bytes -= buf_offset;
  1013. } else {
  1014. buf_offset = 0;
  1015. }
  1016. current_buf_start = buf_start;
  1017. /* copy bytes from the working buffer into the pages */
  1018. while (working_bytes > 0) {
  1019. bytes = min_t(unsigned long, bvec.bv_len,
  1020. PAGE_SIZE - buf_offset);
  1021. bytes = min(bytes, working_bytes);
  1022. kaddr = kmap_atomic(bvec.bv_page);
  1023. memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
  1024. kunmap_atomic(kaddr);
  1025. flush_dcache_page(bvec.bv_page);
  1026. buf_offset += bytes;
  1027. working_bytes -= bytes;
  1028. current_buf_start += bytes;
  1029. /* check if we need to pick another page */
  1030. bio_advance(bio, bytes);
  1031. if (!bio->bi_iter.bi_size)
  1032. return 0;
  1033. bvec = bio_iter_iovec(bio, bio->bi_iter);
  1034. prev_start_byte = start_byte;
  1035. start_byte = page_offset(bvec.bv_page) - disk_start;
  1036. /*
  1037. * We need to make sure we're only adjusting
  1038. * our offset into compression working buffer when
  1039. * we're switching pages. Otherwise we can incorrectly
  1040. * keep copying when we were actually done.
  1041. */
  1042. if (start_byte != prev_start_byte) {
  1043. /*
  1044. * make sure our new page is covered by this
  1045. * working buffer
  1046. */
  1047. if (total_out <= start_byte)
  1048. return 1;
  1049. /*
  1050. * the next page in the biovec might not be adjacent
  1051. * to the last page, but it might still be found
  1052. * inside this working buffer. bump our offset pointer
  1053. */
  1054. if (total_out > start_byte &&
  1055. current_buf_start < start_byte) {
  1056. buf_offset = start_byte - buf_start;
  1057. working_bytes = total_out - start_byte;
  1058. current_buf_start = buf_start + buf_offset;
  1059. }
  1060. }
  1061. }
  1062. return 1;
  1063. }
  1064. /*
  1065. * Shannon Entropy calculation
  1066. *
  1067. * Pure byte distribution analysis fails to determine compressiability of data.
  1068. * Try calculating entropy to estimate the average minimum number of bits
  1069. * needed to encode the sampled data.
  1070. *
  1071. * For convenience, return the percentage of needed bits, instead of amount of
  1072. * bits directly.
  1073. *
  1074. * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
  1075. * and can be compressible with high probability
  1076. *
  1077. * @ENTROPY_LVL_HIGH - data are not compressible with high probability
  1078. *
  1079. * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
  1080. */
  1081. #define ENTROPY_LVL_ACEPTABLE (65)
  1082. #define ENTROPY_LVL_HIGH (80)
  1083. /*
  1084. * For increasead precision in shannon_entropy calculation,
  1085. * let's do pow(n, M) to save more digits after comma:
  1086. *
  1087. * - maximum int bit length is 64
  1088. * - ilog2(MAX_SAMPLE_SIZE) -> 13
  1089. * - 13 * 4 = 52 < 64 -> M = 4
  1090. *
  1091. * So use pow(n, 4).
  1092. */
  1093. static inline u32 ilog2_w(u64 n)
  1094. {
  1095. return ilog2(n * n * n * n);
  1096. }
  1097. static u32 shannon_entropy(struct heuristic_ws *ws)
  1098. {
  1099. const u32 entropy_max = 8 * ilog2_w(2);
  1100. u32 entropy_sum = 0;
  1101. u32 p, p_base, sz_base;
  1102. u32 i;
  1103. sz_base = ilog2_w(ws->sample_size);
  1104. for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
  1105. p = ws->bucket[i].count;
  1106. p_base = ilog2_w(p);
  1107. entropy_sum += p * (sz_base - p_base);
  1108. }
  1109. entropy_sum /= ws->sample_size;
  1110. return entropy_sum * 100 / entropy_max;
  1111. }
  1112. #define RADIX_BASE 4U
  1113. #define COUNTERS_SIZE (1U << RADIX_BASE)
  1114. static u8 get4bits(u64 num, int shift) {
  1115. u8 low4bits;
  1116. num >>= shift;
  1117. /* Reverse order */
  1118. low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
  1119. return low4bits;
  1120. }
  1121. /*
  1122. * Use 4 bits as radix base
  1123. * Use 16 u32 counters for calculating new possition in buf array
  1124. *
  1125. * @array - array that will be sorted
  1126. * @array_buf - buffer array to store sorting results
  1127. * must be equal in size to @array
  1128. * @num - array size
  1129. */
  1130. static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
  1131. int num)
  1132. {
  1133. u64 max_num;
  1134. u64 buf_num;
  1135. u32 counters[COUNTERS_SIZE];
  1136. u32 new_addr;
  1137. u32 addr;
  1138. int bitlen;
  1139. int shift;
  1140. int i;
  1141. /*
  1142. * Try avoid useless loop iterations for small numbers stored in big
  1143. * counters. Example: 48 33 4 ... in 64bit array
  1144. */
  1145. max_num = array[0].count;
  1146. for (i = 1; i < num; i++) {
  1147. buf_num = array[i].count;
  1148. if (buf_num > max_num)
  1149. max_num = buf_num;
  1150. }
  1151. buf_num = ilog2(max_num);
  1152. bitlen = ALIGN(buf_num, RADIX_BASE * 2);
  1153. shift = 0;
  1154. while (shift < bitlen) {
  1155. memset(counters, 0, sizeof(counters));
  1156. for (i = 0; i < num; i++) {
  1157. buf_num = array[i].count;
  1158. addr = get4bits(buf_num, shift);
  1159. counters[addr]++;
  1160. }
  1161. for (i = 1; i < COUNTERS_SIZE; i++)
  1162. counters[i] += counters[i - 1];
  1163. for (i = num - 1; i >= 0; i--) {
  1164. buf_num = array[i].count;
  1165. addr = get4bits(buf_num, shift);
  1166. counters[addr]--;
  1167. new_addr = counters[addr];
  1168. array_buf[new_addr] = array[i];
  1169. }
  1170. shift += RADIX_BASE;
  1171. /*
  1172. * Normal radix expects to move data from a temporary array, to
  1173. * the main one. But that requires some CPU time. Avoid that
  1174. * by doing another sort iteration to original array instead of
  1175. * memcpy()
  1176. */
  1177. memset(counters, 0, sizeof(counters));
  1178. for (i = 0; i < num; i ++) {
  1179. buf_num = array_buf[i].count;
  1180. addr = get4bits(buf_num, shift);
  1181. counters[addr]++;
  1182. }
  1183. for (i = 1; i < COUNTERS_SIZE; i++)
  1184. counters[i] += counters[i - 1];
  1185. for (i = num - 1; i >= 0; i--) {
  1186. buf_num = array_buf[i].count;
  1187. addr = get4bits(buf_num, shift);
  1188. counters[addr]--;
  1189. new_addr = counters[addr];
  1190. array[new_addr] = array_buf[i];
  1191. }
  1192. shift += RADIX_BASE;
  1193. }
  1194. }
  1195. /*
  1196. * Size of the core byte set - how many bytes cover 90% of the sample
  1197. *
  1198. * There are several types of structured binary data that use nearly all byte
  1199. * values. The distribution can be uniform and counts in all buckets will be
  1200. * nearly the same (eg. encrypted data). Unlikely to be compressible.
  1201. *
  1202. * Other possibility is normal (Gaussian) distribution, where the data could
  1203. * be potentially compressible, but we have to take a few more steps to decide
  1204. * how much.
  1205. *
  1206. * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
  1207. * compression algo can easy fix that
  1208. * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
  1209. * probability is not compressible
  1210. */
  1211. #define BYTE_CORE_SET_LOW (64)
  1212. #define BYTE_CORE_SET_HIGH (200)
  1213. static int byte_core_set_size(struct heuristic_ws *ws)
  1214. {
  1215. u32 i;
  1216. u32 coreset_sum = 0;
  1217. const u32 core_set_threshold = ws->sample_size * 90 / 100;
  1218. struct bucket_item *bucket = ws->bucket;
  1219. /* Sort in reverse order */
  1220. radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
  1221. for (i = 0; i < BYTE_CORE_SET_LOW; i++)
  1222. coreset_sum += bucket[i].count;
  1223. if (coreset_sum > core_set_threshold)
  1224. return i;
  1225. for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
  1226. coreset_sum += bucket[i].count;
  1227. if (coreset_sum > core_set_threshold)
  1228. break;
  1229. }
  1230. return i;
  1231. }
  1232. /*
  1233. * Count byte values in buckets.
  1234. * This heuristic can detect textual data (configs, xml, json, html, etc).
  1235. * Because in most text-like data byte set is restricted to limited number of
  1236. * possible characters, and that restriction in most cases makes data easy to
  1237. * compress.
  1238. *
  1239. * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
  1240. * less - compressible
  1241. * more - need additional analysis
  1242. */
  1243. #define BYTE_SET_THRESHOLD (64)
  1244. static u32 byte_set_size(const struct heuristic_ws *ws)
  1245. {
  1246. u32 i;
  1247. u32 byte_set_size = 0;
  1248. for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
  1249. if (ws->bucket[i].count > 0)
  1250. byte_set_size++;
  1251. }
  1252. /*
  1253. * Continue collecting count of byte values in buckets. If the byte
  1254. * set size is bigger then the threshold, it's pointless to continue,
  1255. * the detection technique would fail for this type of data.
  1256. */
  1257. for (; i < BUCKET_SIZE; i++) {
  1258. if (ws->bucket[i].count > 0) {
  1259. byte_set_size++;
  1260. if (byte_set_size > BYTE_SET_THRESHOLD)
  1261. return byte_set_size;
  1262. }
  1263. }
  1264. return byte_set_size;
  1265. }
  1266. static bool sample_repeated_patterns(struct heuristic_ws *ws)
  1267. {
  1268. const u32 half_of_sample = ws->sample_size / 2;
  1269. const u8 *data = ws->sample;
  1270. return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
  1271. }
  1272. static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
  1273. struct heuristic_ws *ws)
  1274. {
  1275. struct page *page;
  1276. u64 index, index_end;
  1277. u32 i, curr_sample_pos;
  1278. u8 *in_data;
  1279. /*
  1280. * Compression handles the input data by chunks of 128KiB
  1281. * (defined by BTRFS_MAX_UNCOMPRESSED)
  1282. *
  1283. * We do the same for the heuristic and loop over the whole range.
  1284. *
  1285. * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
  1286. * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
  1287. */
  1288. if (end - start > BTRFS_MAX_UNCOMPRESSED)
  1289. end = start + BTRFS_MAX_UNCOMPRESSED;
  1290. index = start >> PAGE_SHIFT;
  1291. index_end = end >> PAGE_SHIFT;
  1292. /* Don't miss unaligned end */
  1293. if (!IS_ALIGNED(end, PAGE_SIZE))
  1294. index_end++;
  1295. curr_sample_pos = 0;
  1296. while (index < index_end) {
  1297. page = find_get_page(inode->i_mapping, index);
  1298. in_data = kmap(page);
  1299. /* Handle case where the start is not aligned to PAGE_SIZE */
  1300. i = start % PAGE_SIZE;
  1301. while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
  1302. /* Don't sample any garbage from the last page */
  1303. if (start > end - SAMPLING_READ_SIZE)
  1304. break;
  1305. memcpy(&ws->sample[curr_sample_pos], &in_data[i],
  1306. SAMPLING_READ_SIZE);
  1307. i += SAMPLING_INTERVAL;
  1308. start += SAMPLING_INTERVAL;
  1309. curr_sample_pos += SAMPLING_READ_SIZE;
  1310. }
  1311. kunmap(page);
  1312. put_page(page);
  1313. index++;
  1314. }
  1315. ws->sample_size = curr_sample_pos;
  1316. }
  1317. /*
  1318. * Compression heuristic.
  1319. *
  1320. * For now is's a naive and optimistic 'return true', we'll extend the logic to
  1321. * quickly (compared to direct compression) detect data characteristics
  1322. * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
  1323. * data.
  1324. *
  1325. * The following types of analysis can be performed:
  1326. * - detect mostly zero data
  1327. * - detect data with low "byte set" size (text, etc)
  1328. * - detect data with low/high "core byte" set
  1329. *
  1330. * Return non-zero if the compression should be done, 0 otherwise.
  1331. */
  1332. int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
  1333. {
  1334. struct list_head *ws_list = __find_workspace(0, true);
  1335. struct heuristic_ws *ws;
  1336. u32 i;
  1337. u8 byte;
  1338. int ret = 0;
  1339. ws = list_entry(ws_list, struct heuristic_ws, list);
  1340. heuristic_collect_sample(inode, start, end, ws);
  1341. if (sample_repeated_patterns(ws)) {
  1342. ret = 1;
  1343. goto out;
  1344. }
  1345. memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
  1346. for (i = 0; i < ws->sample_size; i++) {
  1347. byte = ws->sample[i];
  1348. ws->bucket[byte].count++;
  1349. }
  1350. i = byte_set_size(ws);
  1351. if (i < BYTE_SET_THRESHOLD) {
  1352. ret = 2;
  1353. goto out;
  1354. }
  1355. i = byte_core_set_size(ws);
  1356. if (i <= BYTE_CORE_SET_LOW) {
  1357. ret = 3;
  1358. goto out;
  1359. }
  1360. if (i >= BYTE_CORE_SET_HIGH) {
  1361. ret = 0;
  1362. goto out;
  1363. }
  1364. i = shannon_entropy(ws);
  1365. if (i <= ENTROPY_LVL_ACEPTABLE) {
  1366. ret = 4;
  1367. goto out;
  1368. }
  1369. /*
  1370. * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
  1371. * needed to give green light to compression.
  1372. *
  1373. * For now just assume that compression at that level is not worth the
  1374. * resources because:
  1375. *
  1376. * 1. it is possible to defrag the data later
  1377. *
  1378. * 2. the data would turn out to be hardly compressible, eg. 150 byte
  1379. * values, every bucket has counter at level ~54. The heuristic would
  1380. * be confused. This can happen when data have some internal repeated
  1381. * patterns like "abbacbbc...". This can be detected by analyzing
  1382. * pairs of bytes, which is too costly.
  1383. */
  1384. if (i < ENTROPY_LVL_HIGH) {
  1385. ret = 5;
  1386. goto out;
  1387. } else {
  1388. ret = 0;
  1389. goto out;
  1390. }
  1391. out:
  1392. __free_workspace(0, ws_list, true);
  1393. return ret;
  1394. }
  1395. unsigned int btrfs_compress_str2level(const char *str)
  1396. {
  1397. if (strncmp(str, "zlib", 4) != 0)
  1398. return 0;
  1399. /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
  1400. if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
  1401. return str[5] - '0';
  1402. return BTRFS_ZLIB_DEFAULT_LEVEL;
  1403. }