request.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. /*
  2. * Main bcache entry point - handle a read or a write request and decide what to
  3. * do with it; the make_request functions are called by the block layer.
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "request.h"
  12. #include "writeback.h"
  13. #include <linux/cgroup.h>
  14. #include <linux/module.h>
  15. #include <linux/hash.h>
  16. #include <linux/random.h>
  17. #include "blk-cgroup.h"
  18. #include <trace/events/bcache.h>
  19. #define CUTOFF_CACHE_ADD 95
  20. #define CUTOFF_CACHE_READA 90
  21. struct kmem_cache *bch_search_cache;
  22. static void bch_data_insert_start(struct closure *);
  23. /* Cgroup interface */
  24. #ifdef CONFIG_CGROUP_BCACHE
  25. static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
  26. static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
  27. {
  28. struct cgroup_subsys_state *css;
  29. return cgroup &&
  30. (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
  31. ? container_of(css, struct bch_cgroup, css)
  32. : &bcache_default_cgroup;
  33. }
  34. struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
  35. {
  36. struct cgroup_subsys_state *css = bio->bi_css
  37. ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
  38. : task_subsys_state(current, bcache_subsys_id);
  39. return css
  40. ? container_of(css, struct bch_cgroup, css)
  41. : &bcache_default_cgroup;
  42. }
  43. static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
  44. struct file *file,
  45. char __user *buf, size_t nbytes, loff_t *ppos)
  46. {
  47. char tmp[1024];
  48. int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
  49. cgroup_to_bcache(cgrp)->cache_mode + 1);
  50. if (len < 0)
  51. return len;
  52. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  53. }
  54. static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
  55. const char *buf)
  56. {
  57. int v = bch_read_string_list(buf, bch_cache_modes);
  58. if (v < 0)
  59. return v;
  60. cgroup_to_bcache(cgrp)->cache_mode = v - 1;
  61. return 0;
  62. }
  63. static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
  64. {
  65. return cgroup_to_bcache(cgrp)->verify;
  66. }
  67. static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
  68. {
  69. cgroup_to_bcache(cgrp)->verify = val;
  70. return 0;
  71. }
  72. static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
  73. {
  74. struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
  75. return atomic_read(&bcachecg->stats.cache_hits);
  76. }
  77. static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
  78. {
  79. struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
  80. return atomic_read(&bcachecg->stats.cache_misses);
  81. }
  82. static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
  83. struct cftype *cft)
  84. {
  85. struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
  86. return atomic_read(&bcachecg->stats.cache_bypass_hits);
  87. }
  88. static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
  89. struct cftype *cft)
  90. {
  91. struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
  92. return atomic_read(&bcachecg->stats.cache_bypass_misses);
  93. }
  94. static struct cftype bch_files[] = {
  95. {
  96. .name = "cache_mode",
  97. .read = cache_mode_read,
  98. .write_string = cache_mode_write,
  99. },
  100. {
  101. .name = "verify",
  102. .read_u64 = bch_verify_read,
  103. .write_u64 = bch_verify_write,
  104. },
  105. {
  106. .name = "cache_hits",
  107. .read_u64 = bch_cache_hits_read,
  108. },
  109. {
  110. .name = "cache_misses",
  111. .read_u64 = bch_cache_misses_read,
  112. },
  113. {
  114. .name = "cache_bypass_hits",
  115. .read_u64 = bch_cache_bypass_hits_read,
  116. },
  117. {
  118. .name = "cache_bypass_misses",
  119. .read_u64 = bch_cache_bypass_misses_read,
  120. },
  121. { } /* terminate */
  122. };
  123. static void init_bch_cgroup(struct bch_cgroup *cg)
  124. {
  125. cg->cache_mode = -1;
  126. }
  127. static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
  128. {
  129. struct bch_cgroup *cg;
  130. cg = kzalloc(sizeof(*cg), GFP_KERNEL);
  131. if (!cg)
  132. return ERR_PTR(-ENOMEM);
  133. init_bch_cgroup(cg);
  134. return &cg->css;
  135. }
  136. static void bcachecg_destroy(struct cgroup *cgroup)
  137. {
  138. struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
  139. free_css_id(&bcache_subsys, &cg->css);
  140. kfree(cg);
  141. }
  142. struct cgroup_subsys bcache_subsys = {
  143. .create = bcachecg_create,
  144. .destroy = bcachecg_destroy,
  145. .subsys_id = bcache_subsys_id,
  146. .name = "bcache",
  147. .module = THIS_MODULE,
  148. };
  149. EXPORT_SYMBOL_GPL(bcache_subsys);
  150. #endif
  151. static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
  152. {
  153. #ifdef CONFIG_CGROUP_BCACHE
  154. int r = bch_bio_to_cgroup(bio)->cache_mode;
  155. if (r >= 0)
  156. return r;
  157. #endif
  158. return BDEV_CACHE_MODE(&dc->sb);
  159. }
  160. static bool verify(struct cached_dev *dc, struct bio *bio)
  161. {
  162. #ifdef CONFIG_CGROUP_BCACHE
  163. if (bch_bio_to_cgroup(bio)->verify)
  164. return true;
  165. #endif
  166. return dc->verify;
  167. }
  168. static void bio_csum(struct bio *bio, struct bkey *k)
  169. {
  170. struct bio_vec bv;
  171. struct bvec_iter iter;
  172. uint64_t csum = 0;
  173. bio_for_each_segment(bv, bio, iter) {
  174. void *d = kmap(bv.bv_page) + bv.bv_offset;
  175. csum = bch_crc64_update(csum, d, bv.bv_len);
  176. kunmap(bv.bv_page);
  177. }
  178. k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  179. }
  180. /* Insert data into cache */
  181. static void bch_data_insert_keys(struct closure *cl)
  182. {
  183. struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  184. atomic_t *journal_ref = NULL;
  185. struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  186. int ret;
  187. /*
  188. * If we're looping, might already be waiting on
  189. * another journal write - can't wait on more than one journal write at
  190. * a time
  191. *
  192. * XXX: this looks wrong
  193. */
  194. #if 0
  195. while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
  196. closure_sync(&s->cl);
  197. #endif
  198. if (!op->replace)
  199. journal_ref = bch_journal(op->c, &op->insert_keys,
  200. op->flush_journal ? cl : NULL);
  201. ret = bch_btree_insert(op->c, &op->insert_keys,
  202. journal_ref, replace_key);
  203. if (ret == -ESRCH) {
  204. op->replace_collision = true;
  205. } else if (ret) {
  206. op->error = -ENOMEM;
  207. op->insert_data_done = true;
  208. }
  209. if (journal_ref)
  210. atomic_dec_bug(journal_ref);
  211. if (!op->insert_data_done)
  212. continue_at(cl, bch_data_insert_start, bcache_wq);
  213. bch_keylist_free(&op->insert_keys);
  214. closure_return(cl);
  215. }
  216. static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
  217. struct cache_set *c)
  218. {
  219. size_t oldsize = bch_keylist_nkeys(l);
  220. size_t newsize = oldsize + u64s;
  221. /*
  222. * The journalling code doesn't handle the case where the keys to insert
  223. * is bigger than an empty write: If we just return -ENOMEM here,
  224. * bio_insert() and bio_invalidate() will insert the keys created so far
  225. * and finish the rest when the keylist is empty.
  226. */
  227. if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
  228. return -ENOMEM;
  229. return __bch_keylist_realloc(l, u64s);
  230. }
  231. static void bch_data_invalidate(struct closure *cl)
  232. {
  233. struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  234. struct bio *bio = op->bio;
  235. pr_debug("invalidating %i sectors from %llu",
  236. bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
  237. while (bio_sectors(bio)) {
  238. unsigned sectors = min(bio_sectors(bio),
  239. 1U << (KEY_SIZE_BITS - 1));
  240. if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
  241. goto out;
  242. bio->bi_iter.bi_sector += sectors;
  243. bio->bi_iter.bi_size -= sectors << 9;
  244. bch_keylist_add(&op->insert_keys,
  245. &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
  246. }
  247. op->insert_data_done = true;
  248. bio_put(bio);
  249. out:
  250. continue_at(cl, bch_data_insert_keys, bcache_wq);
  251. }
  252. static void bch_data_insert_error(struct closure *cl)
  253. {
  254. struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  255. /*
  256. * Our data write just errored, which means we've got a bunch of keys to
  257. * insert that point to data that wasn't succesfully written.
  258. *
  259. * We don't have to insert those keys but we still have to invalidate
  260. * that region of the cache - so, if we just strip off all the pointers
  261. * from the keys we'll accomplish just that.
  262. */
  263. struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
  264. while (src != op->insert_keys.top) {
  265. struct bkey *n = bkey_next(src);
  266. SET_KEY_PTRS(src, 0);
  267. memmove(dst, src, bkey_bytes(src));
  268. dst = bkey_next(dst);
  269. src = n;
  270. }
  271. op->insert_keys.top = dst;
  272. bch_data_insert_keys(cl);
  273. }
  274. static void bch_data_insert_endio(struct bio *bio, int error)
  275. {
  276. struct closure *cl = bio->bi_private;
  277. struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  278. if (error) {
  279. /* TODO: We could try to recover from this. */
  280. if (op->writeback)
  281. op->error = error;
  282. else if (!op->replace)
  283. set_closure_fn(cl, bch_data_insert_error, bcache_wq);
  284. else
  285. set_closure_fn(cl, NULL, NULL);
  286. }
  287. bch_bbio_endio(op->c, bio, error, "writing data to cache");
  288. }
  289. static void bch_data_insert_start(struct closure *cl)
  290. {
  291. struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  292. struct bio *bio = op->bio, *n;
  293. if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
  294. set_gc_sectors(op->c);
  295. wake_up_gc(op->c);
  296. }
  297. if (op->bypass)
  298. return bch_data_invalidate(cl);
  299. /*
  300. * Journal writes are marked REQ_FLUSH; if the original write was a
  301. * flush, it'll wait on the journal write.
  302. */
  303. bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
  304. do {
  305. unsigned i;
  306. struct bkey *k;
  307. struct bio_set *split = op->c->bio_split;
  308. /* 1 for the device pointer and 1 for the chksum */
  309. if (bch_keylist_realloc(&op->insert_keys,
  310. 3 + (op->csum ? 1 : 0),
  311. op->c))
  312. continue_at(cl, bch_data_insert_keys, bcache_wq);
  313. k = op->insert_keys.top;
  314. bkey_init(k);
  315. SET_KEY_INODE(k, op->inode);
  316. SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
  317. if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
  318. op->write_point, op->write_prio,
  319. op->writeback))
  320. goto err;
  321. n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
  322. n->bi_end_io = bch_data_insert_endio;
  323. n->bi_private = cl;
  324. if (op->writeback) {
  325. SET_KEY_DIRTY(k, true);
  326. for (i = 0; i < KEY_PTRS(k); i++)
  327. SET_GC_MARK(PTR_BUCKET(op->c, k, i),
  328. GC_MARK_DIRTY);
  329. }
  330. SET_KEY_CSUM(k, op->csum);
  331. if (KEY_CSUM(k))
  332. bio_csum(n, k);
  333. trace_bcache_cache_insert(k);
  334. bch_keylist_push(&op->insert_keys);
  335. n->bi_rw |= REQ_WRITE;
  336. bch_submit_bbio(n, op->c, k, 0);
  337. } while (n != bio);
  338. op->insert_data_done = true;
  339. continue_at(cl, bch_data_insert_keys, bcache_wq);
  340. err:
  341. /* bch_alloc_sectors() blocks if s->writeback = true */
  342. BUG_ON(op->writeback);
  343. /*
  344. * But if it's not a writeback write we'd rather just bail out if
  345. * there aren't any buckets ready to write to - it might take awhile and
  346. * we might be starving btree writes for gc or something.
  347. */
  348. if (!op->replace) {
  349. /*
  350. * Writethrough write: We can't complete the write until we've
  351. * updated the index. But we don't want to delay the write while
  352. * we wait for buckets to be freed up, so just invalidate the
  353. * rest of the write.
  354. */
  355. op->bypass = true;
  356. return bch_data_invalidate(cl);
  357. } else {
  358. /*
  359. * From a cache miss, we can just insert the keys for the data
  360. * we have written or bail out if we didn't do anything.
  361. */
  362. op->insert_data_done = true;
  363. bio_put(bio);
  364. if (!bch_keylist_empty(&op->insert_keys))
  365. continue_at(cl, bch_data_insert_keys, bcache_wq);
  366. else
  367. closure_return(cl);
  368. }
  369. }
  370. /**
  371. * bch_data_insert - stick some data in the cache
  372. *
  373. * This is the starting point for any data to end up in a cache device; it could
  374. * be from a normal write, or a writeback write, or a write to a flash only
  375. * volume - it's also used by the moving garbage collector to compact data in
  376. * mostly empty buckets.
  377. *
  378. * It first writes the data to the cache, creating a list of keys to be inserted
  379. * (if the data had to be fragmented there will be multiple keys); after the
  380. * data is written it calls bch_journal, and after the keys have been added to
  381. * the next journal write they're inserted into the btree.
  382. *
  383. * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
  384. * and op->inode is used for the key inode.
  385. *
  386. * If s->bypass is true, instead of inserting the data it invalidates the
  387. * region of the cache represented by s->cache_bio and op->inode.
  388. */
  389. void bch_data_insert(struct closure *cl)
  390. {
  391. struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  392. trace_bcache_write(op->bio, op->writeback, op->bypass);
  393. bch_keylist_init(&op->insert_keys);
  394. bio_get(op->bio);
  395. bch_data_insert_start(cl);
  396. }
  397. /* Congested? */
  398. unsigned bch_get_congested(struct cache_set *c)
  399. {
  400. int i;
  401. long rand;
  402. if (!c->congested_read_threshold_us &&
  403. !c->congested_write_threshold_us)
  404. return 0;
  405. i = (local_clock_us() - c->congested_last_us) / 1024;
  406. if (i < 0)
  407. return 0;
  408. i += atomic_read(&c->congested);
  409. if (i >= 0)
  410. return 0;
  411. i += CONGESTED_MAX;
  412. if (i > 0)
  413. i = fract_exp_two(i, 6);
  414. rand = get_random_int();
  415. i -= bitmap_weight(&rand, BITS_PER_LONG);
  416. return i > 0 ? i : 1;
  417. }
  418. static void add_sequential(struct task_struct *t)
  419. {
  420. ewma_add(t->sequential_io_avg,
  421. t->sequential_io, 8, 0);
  422. t->sequential_io = 0;
  423. }
  424. static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
  425. {
  426. return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
  427. }
  428. static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
  429. {
  430. struct cache_set *c = dc->disk.c;
  431. unsigned mode = cache_mode(dc, bio);
  432. unsigned sectors, congested = bch_get_congested(c);
  433. struct task_struct *task = current;
  434. struct io *i;
  435. if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
  436. c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
  437. (bio->bi_rw & REQ_DISCARD))
  438. goto skip;
  439. if (mode == CACHE_MODE_NONE ||
  440. (mode == CACHE_MODE_WRITEAROUND &&
  441. (bio->bi_rw & REQ_WRITE)))
  442. goto skip;
  443. if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
  444. bio_sectors(bio) & (c->sb.block_size - 1)) {
  445. pr_debug("skipping unaligned io");
  446. goto skip;
  447. }
  448. if (bypass_torture_test(dc)) {
  449. if ((get_random_int() & 3) == 3)
  450. goto skip;
  451. else
  452. goto rescale;
  453. }
  454. if (!congested && !dc->sequential_cutoff)
  455. goto rescale;
  456. if (!congested &&
  457. mode == CACHE_MODE_WRITEBACK &&
  458. (bio->bi_rw & REQ_WRITE) &&
  459. (bio->bi_rw & REQ_SYNC))
  460. goto rescale;
  461. spin_lock(&dc->io_lock);
  462. hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
  463. if (i->last == bio->bi_iter.bi_sector &&
  464. time_before(jiffies, i->jiffies))
  465. goto found;
  466. i = list_first_entry(&dc->io_lru, struct io, lru);
  467. add_sequential(task);
  468. i->sequential = 0;
  469. found:
  470. if (i->sequential + bio->bi_iter.bi_size > i->sequential)
  471. i->sequential += bio->bi_iter.bi_size;
  472. i->last = bio_end_sector(bio);
  473. i->jiffies = jiffies + msecs_to_jiffies(5000);
  474. task->sequential_io = i->sequential;
  475. hlist_del(&i->hash);
  476. hlist_add_head(&i->hash, iohash(dc, i->last));
  477. list_move_tail(&i->lru, &dc->io_lru);
  478. spin_unlock(&dc->io_lock);
  479. sectors = max(task->sequential_io,
  480. task->sequential_io_avg) >> 9;
  481. if (dc->sequential_cutoff &&
  482. sectors >= dc->sequential_cutoff >> 9) {
  483. trace_bcache_bypass_sequential(bio);
  484. goto skip;
  485. }
  486. if (congested && sectors >= congested) {
  487. trace_bcache_bypass_congested(bio);
  488. goto skip;
  489. }
  490. rescale:
  491. bch_rescale_priorities(c, bio_sectors(bio));
  492. return false;
  493. skip:
  494. bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
  495. return true;
  496. }
  497. /* Cache lookup */
  498. struct search {
  499. /* Stack frame for bio_complete */
  500. struct closure cl;
  501. struct bbio bio;
  502. struct bio *orig_bio;
  503. struct bio *cache_miss;
  504. struct bcache_device *d;
  505. unsigned insert_bio_sectors;
  506. unsigned recoverable:1;
  507. unsigned write:1;
  508. unsigned read_dirty_data:1;
  509. unsigned long start_time;
  510. struct btree_op op;
  511. struct data_insert_op iop;
  512. };
  513. static void bch_cache_read_endio(struct bio *bio, int error)
  514. {
  515. struct bbio *b = container_of(bio, struct bbio, bio);
  516. struct closure *cl = bio->bi_private;
  517. struct search *s = container_of(cl, struct search, cl);
  518. /*
  519. * If the bucket was reused while our bio was in flight, we might have
  520. * read the wrong data. Set s->error but not error so it doesn't get
  521. * counted against the cache device, but we'll still reread the data
  522. * from the backing device.
  523. */
  524. if (error)
  525. s->iop.error = error;
  526. else if (!KEY_DIRTY(&b->key) &&
  527. ptr_stale(s->iop.c, &b->key, 0)) {
  528. atomic_long_inc(&s->iop.c->cache_read_races);
  529. s->iop.error = -EINTR;
  530. }
  531. bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
  532. }
  533. /*
  534. * Read from a single key, handling the initial cache miss if the key starts in
  535. * the middle of the bio
  536. */
  537. static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
  538. {
  539. struct search *s = container_of(op, struct search, op);
  540. struct bio *n, *bio = &s->bio.bio;
  541. struct bkey *bio_key;
  542. unsigned ptr;
  543. if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
  544. return MAP_CONTINUE;
  545. if (KEY_INODE(k) != s->iop.inode ||
  546. KEY_START(k) > bio->bi_iter.bi_sector) {
  547. unsigned bio_sectors = bio_sectors(bio);
  548. unsigned sectors = KEY_INODE(k) == s->iop.inode
  549. ? min_t(uint64_t, INT_MAX,
  550. KEY_START(k) - bio->bi_iter.bi_sector)
  551. : INT_MAX;
  552. int ret = s->d->cache_miss(b, s, bio, sectors);
  553. if (ret != MAP_CONTINUE)
  554. return ret;
  555. /* if this was a complete miss we shouldn't get here */
  556. BUG_ON(bio_sectors <= sectors);
  557. }
  558. if (!KEY_SIZE(k))
  559. return MAP_CONTINUE;
  560. /* XXX: figure out best pointer - for multiple cache devices */
  561. ptr = 0;
  562. PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
  563. if (KEY_DIRTY(k))
  564. s->read_dirty_data = true;
  565. n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
  566. KEY_OFFSET(k) - bio->bi_iter.bi_sector),
  567. GFP_NOIO, s->d->bio_split);
  568. bio_key = &container_of(n, struct bbio, bio)->key;
  569. bch_bkey_copy_single_ptr(bio_key, k, ptr);
  570. bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
  571. bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
  572. n->bi_end_io = bch_cache_read_endio;
  573. n->bi_private = &s->cl;
  574. /*
  575. * The bucket we're reading from might be reused while our bio
  576. * is in flight, and we could then end up reading the wrong
  577. * data.
  578. *
  579. * We guard against this by checking (in cache_read_endio()) if
  580. * the pointer is stale again; if so, we treat it as an error
  581. * and reread from the backing device (but we don't pass that
  582. * error up anywhere).
  583. */
  584. __bch_submit_bbio(n, b->c);
  585. return n == bio ? MAP_DONE : MAP_CONTINUE;
  586. }
  587. static void cache_lookup(struct closure *cl)
  588. {
  589. struct search *s = container_of(cl, struct search, iop.cl);
  590. struct bio *bio = &s->bio.bio;
  591. int ret;
  592. bch_btree_op_init(&s->op, -1);
  593. ret = bch_btree_map_keys(&s->op, s->iop.c,
  594. &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
  595. cache_lookup_fn, MAP_END_KEY);
  596. if (ret == -EAGAIN)
  597. continue_at(cl, cache_lookup, bcache_wq);
  598. closure_return(cl);
  599. }
  600. /* Common code for the make_request functions */
  601. static void request_endio(struct bio *bio, int error)
  602. {
  603. struct closure *cl = bio->bi_private;
  604. if (error) {
  605. struct search *s = container_of(cl, struct search, cl);
  606. s->iop.error = error;
  607. /* Only cache read errors are recoverable */
  608. s->recoverable = false;
  609. }
  610. bio_put(bio);
  611. closure_put(cl);
  612. }
  613. static void bio_complete(struct search *s)
  614. {
  615. if (s->orig_bio) {
  616. int cpu, rw = bio_data_dir(s->orig_bio);
  617. unsigned long duration = jiffies - s->start_time;
  618. cpu = part_stat_lock();
  619. part_round_stats(cpu, &s->d->disk->part0);
  620. part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
  621. part_stat_unlock();
  622. trace_bcache_request_end(s->d, s->orig_bio);
  623. bio_endio(s->orig_bio, s->iop.error);
  624. s->orig_bio = NULL;
  625. }
  626. }
  627. static void do_bio_hook(struct search *s, struct bio *orig_bio)
  628. {
  629. struct bio *bio = &s->bio.bio;
  630. bio_init(bio);
  631. __bio_clone_fast(bio, orig_bio);
  632. bio->bi_end_io = request_endio;
  633. bio->bi_private = &s->cl;
  634. atomic_set(&bio->bi_cnt, 3);
  635. }
  636. static void search_free(struct closure *cl)
  637. {
  638. struct search *s = container_of(cl, struct search, cl);
  639. bio_complete(s);
  640. if (s->iop.bio)
  641. bio_put(s->iop.bio);
  642. closure_debug_destroy(cl);
  643. mempool_free(s, s->d->c->search);
  644. }
  645. static inline struct search *search_alloc(struct bio *bio,
  646. struct bcache_device *d)
  647. {
  648. struct search *s;
  649. s = mempool_alloc(d->c->search, GFP_NOIO);
  650. closure_init(&s->cl, NULL);
  651. do_bio_hook(s, bio);
  652. s->orig_bio = bio;
  653. s->cache_miss = NULL;
  654. s->d = d;
  655. s->recoverable = 1;
  656. s->write = (bio->bi_rw & REQ_WRITE) != 0;
  657. s->read_dirty_data = 0;
  658. s->start_time = jiffies;
  659. s->iop.c = d->c;
  660. s->iop.bio = NULL;
  661. s->iop.inode = d->id;
  662. s->iop.write_point = hash_long((unsigned long) current, 16);
  663. s->iop.write_prio = 0;
  664. s->iop.error = 0;
  665. s->iop.flags = 0;
  666. s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
  667. return s;
  668. }
  669. /* Cached devices */
  670. static void cached_dev_bio_complete(struct closure *cl)
  671. {
  672. struct search *s = container_of(cl, struct search, cl);
  673. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  674. search_free(cl);
  675. cached_dev_put(dc);
  676. }
  677. /* Process reads */
  678. static void cached_dev_cache_miss_done(struct closure *cl)
  679. {
  680. struct search *s = container_of(cl, struct search, cl);
  681. if (s->iop.replace_collision)
  682. bch_mark_cache_miss_collision(s->iop.c, s->d);
  683. if (s->iop.bio) {
  684. int i;
  685. struct bio_vec *bv;
  686. bio_for_each_segment_all(bv, s->iop.bio, i)
  687. __free_page(bv->bv_page);
  688. }
  689. cached_dev_bio_complete(cl);
  690. }
  691. static void cached_dev_read_error(struct closure *cl)
  692. {
  693. struct search *s = container_of(cl, struct search, cl);
  694. struct bio *bio = &s->bio.bio;
  695. if (s->recoverable) {
  696. /* Retry from the backing device: */
  697. trace_bcache_read_retry(s->orig_bio);
  698. s->iop.error = 0;
  699. do_bio_hook(s, s->orig_bio);
  700. /* XXX: invalidate cache */
  701. closure_bio_submit(bio, cl, s->d);
  702. }
  703. continue_at(cl, cached_dev_cache_miss_done, NULL);
  704. }
  705. static void cached_dev_read_done(struct closure *cl)
  706. {
  707. struct search *s = container_of(cl, struct search, cl);
  708. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  709. /*
  710. * We had a cache miss; cache_bio now contains data ready to be inserted
  711. * into the cache.
  712. *
  713. * First, we copy the data we just read from cache_bio's bounce buffers
  714. * to the buffers the original bio pointed to:
  715. */
  716. if (s->iop.bio) {
  717. bio_reset(s->iop.bio);
  718. s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
  719. s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
  720. s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
  721. bch_bio_map(s->iop.bio, NULL);
  722. bio_copy_data(s->cache_miss, s->iop.bio);
  723. bio_put(s->cache_miss);
  724. s->cache_miss = NULL;
  725. }
  726. if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
  727. bch_data_verify(dc, s->orig_bio);
  728. bio_complete(s);
  729. if (s->iop.bio &&
  730. !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
  731. BUG_ON(!s->iop.replace);
  732. closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
  733. }
  734. continue_at(cl, cached_dev_cache_miss_done, NULL);
  735. }
  736. static void cached_dev_read_done_bh(struct closure *cl)
  737. {
  738. struct search *s = container_of(cl, struct search, cl);
  739. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  740. bch_mark_cache_accounting(s->iop.c, s->d,
  741. !s->cache_miss, s->iop.bypass);
  742. trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
  743. if (s->iop.error)
  744. continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
  745. else if (s->iop.bio || verify(dc, &s->bio.bio))
  746. continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
  747. else
  748. continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
  749. }
  750. static int cached_dev_cache_miss(struct btree *b, struct search *s,
  751. struct bio *bio, unsigned sectors)
  752. {
  753. int ret = MAP_CONTINUE;
  754. unsigned reada = 0;
  755. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  756. struct bio *miss, *cache_bio;
  757. if (s->cache_miss || s->iop.bypass) {
  758. miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
  759. ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
  760. goto out_submit;
  761. }
  762. if (!(bio->bi_rw & REQ_RAHEAD) &&
  763. !(bio->bi_rw & REQ_META) &&
  764. s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
  765. reada = min_t(sector_t, dc->readahead >> 9,
  766. bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
  767. s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
  768. s->iop.replace_key = KEY(s->iop.inode,
  769. bio->bi_iter.bi_sector + s->insert_bio_sectors,
  770. s->insert_bio_sectors);
  771. ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
  772. if (ret)
  773. return ret;
  774. s->iop.replace = true;
  775. miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
  776. /* btree_search_recurse()'s btree iterator is no good anymore */
  777. ret = miss == bio ? MAP_DONE : -EINTR;
  778. cache_bio = bio_alloc_bioset(GFP_NOWAIT,
  779. DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
  780. dc->disk.bio_split);
  781. if (!cache_bio)
  782. goto out_submit;
  783. cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
  784. cache_bio->bi_bdev = miss->bi_bdev;
  785. cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
  786. cache_bio->bi_end_io = request_endio;
  787. cache_bio->bi_private = &s->cl;
  788. bch_bio_map(cache_bio, NULL);
  789. if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
  790. goto out_put;
  791. if (reada)
  792. bch_mark_cache_readahead(s->iop.c, s->d);
  793. s->cache_miss = miss;
  794. s->iop.bio = cache_bio;
  795. bio_get(cache_bio);
  796. closure_bio_submit(cache_bio, &s->cl, s->d);
  797. return ret;
  798. out_put:
  799. bio_put(cache_bio);
  800. out_submit:
  801. miss->bi_end_io = request_endio;
  802. miss->bi_private = &s->cl;
  803. closure_bio_submit(miss, &s->cl, s->d);
  804. return ret;
  805. }
  806. static void cached_dev_read(struct cached_dev *dc, struct search *s)
  807. {
  808. struct closure *cl = &s->cl;
  809. closure_call(&s->iop.cl, cache_lookup, NULL, cl);
  810. continue_at(cl, cached_dev_read_done_bh, NULL);
  811. }
  812. /* Process writes */
  813. static void cached_dev_write_complete(struct closure *cl)
  814. {
  815. struct search *s = container_of(cl, struct search, cl);
  816. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  817. up_read_non_owner(&dc->writeback_lock);
  818. cached_dev_bio_complete(cl);
  819. }
  820. static void cached_dev_write(struct cached_dev *dc, struct search *s)
  821. {
  822. struct closure *cl = &s->cl;
  823. struct bio *bio = &s->bio.bio;
  824. struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
  825. struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
  826. bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
  827. down_read_non_owner(&dc->writeback_lock);
  828. if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
  829. /*
  830. * We overlap with some dirty data undergoing background
  831. * writeback, force this write to writeback
  832. */
  833. s->iop.bypass = false;
  834. s->iop.writeback = true;
  835. }
  836. /*
  837. * Discards aren't _required_ to do anything, so skipping if
  838. * check_overlapping returned true is ok
  839. *
  840. * But check_overlapping drops dirty keys for which io hasn't started,
  841. * so we still want to call it.
  842. */
  843. if (bio->bi_rw & REQ_DISCARD)
  844. s->iop.bypass = true;
  845. if (should_writeback(dc, s->orig_bio,
  846. cache_mode(dc, bio),
  847. s->iop.bypass)) {
  848. s->iop.bypass = false;
  849. s->iop.writeback = true;
  850. }
  851. if (s->iop.bypass) {
  852. s->iop.bio = s->orig_bio;
  853. bio_get(s->iop.bio);
  854. if (!(bio->bi_rw & REQ_DISCARD) ||
  855. blk_queue_discard(bdev_get_queue(dc->bdev)))
  856. closure_bio_submit(bio, cl, s->d);
  857. } else if (s->iop.writeback) {
  858. bch_writeback_add(dc);
  859. s->iop.bio = bio;
  860. if (bio->bi_rw & REQ_FLUSH) {
  861. /* Also need to send a flush to the backing device */
  862. struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
  863. dc->disk.bio_split);
  864. flush->bi_rw = WRITE_FLUSH;
  865. flush->bi_bdev = bio->bi_bdev;
  866. flush->bi_end_io = request_endio;
  867. flush->bi_private = cl;
  868. closure_bio_submit(flush, cl, s->d);
  869. }
  870. } else {
  871. s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
  872. closure_bio_submit(bio, cl, s->d);
  873. }
  874. closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
  875. continue_at(cl, cached_dev_write_complete, NULL);
  876. }
  877. static void cached_dev_nodata(struct closure *cl)
  878. {
  879. struct search *s = container_of(cl, struct search, cl);
  880. struct bio *bio = &s->bio.bio;
  881. if (s->iop.flush_journal)
  882. bch_journal_meta(s->iop.c, cl);
  883. /* If it's a flush, we send the flush to the backing device too */
  884. closure_bio_submit(bio, cl, s->d);
  885. continue_at(cl, cached_dev_bio_complete, NULL);
  886. }
  887. /* Cached devices - read & write stuff */
  888. static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
  889. {
  890. struct search *s;
  891. struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
  892. struct cached_dev *dc = container_of(d, struct cached_dev, disk);
  893. int cpu, rw = bio_data_dir(bio);
  894. cpu = part_stat_lock();
  895. part_stat_inc(cpu, &d->disk->part0, ios[rw]);
  896. part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
  897. part_stat_unlock();
  898. bio->bi_bdev = dc->bdev;
  899. bio->bi_iter.bi_sector += dc->sb.data_offset;
  900. if (cached_dev_get(dc)) {
  901. s = search_alloc(bio, d);
  902. trace_bcache_request_start(s->d, bio);
  903. if (!bio->bi_iter.bi_size) {
  904. /*
  905. * can't call bch_journal_meta from under
  906. * generic_make_request
  907. */
  908. continue_at_nobarrier(&s->cl,
  909. cached_dev_nodata,
  910. bcache_wq);
  911. } else {
  912. s->iop.bypass = check_should_bypass(dc, bio);
  913. if (rw)
  914. cached_dev_write(dc, s);
  915. else
  916. cached_dev_read(dc, s);
  917. }
  918. } else {
  919. if ((bio->bi_rw & REQ_DISCARD) &&
  920. !blk_queue_discard(bdev_get_queue(dc->bdev)))
  921. bio_endio(bio, 0);
  922. else
  923. bch_generic_make_request(bio, &d->bio_split_hook);
  924. }
  925. }
  926. static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
  927. unsigned int cmd, unsigned long arg)
  928. {
  929. struct cached_dev *dc = container_of(d, struct cached_dev, disk);
  930. return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
  931. }
  932. static int cached_dev_congested(void *data, int bits)
  933. {
  934. struct bcache_device *d = data;
  935. struct cached_dev *dc = container_of(d, struct cached_dev, disk);
  936. struct request_queue *q = bdev_get_queue(dc->bdev);
  937. int ret = 0;
  938. if (bdi_congested(&q->backing_dev_info, bits))
  939. return 1;
  940. if (cached_dev_get(dc)) {
  941. unsigned i;
  942. struct cache *ca;
  943. for_each_cache(ca, d->c, i) {
  944. q = bdev_get_queue(ca->bdev);
  945. ret |= bdi_congested(&q->backing_dev_info, bits);
  946. }
  947. cached_dev_put(dc);
  948. }
  949. return ret;
  950. }
  951. void bch_cached_dev_request_init(struct cached_dev *dc)
  952. {
  953. struct gendisk *g = dc->disk.disk;
  954. g->queue->make_request_fn = cached_dev_make_request;
  955. g->queue->backing_dev_info.congested_fn = cached_dev_congested;
  956. dc->disk.cache_miss = cached_dev_cache_miss;
  957. dc->disk.ioctl = cached_dev_ioctl;
  958. }
  959. /* Flash backed devices */
  960. static int flash_dev_cache_miss(struct btree *b, struct search *s,
  961. struct bio *bio, unsigned sectors)
  962. {
  963. struct bio_vec bv;
  964. struct bvec_iter iter;
  965. /* Zero fill bio */
  966. bio_for_each_segment(bv, bio, iter) {
  967. unsigned j = min(bv.bv_len >> 9, sectors);
  968. void *p = kmap(bv.bv_page);
  969. memset(p + bv.bv_offset, 0, j << 9);
  970. kunmap(bv.bv_page);
  971. sectors -= j;
  972. }
  973. bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
  974. if (!bio->bi_iter.bi_size)
  975. return MAP_DONE;
  976. return MAP_CONTINUE;
  977. }
  978. static void flash_dev_nodata(struct closure *cl)
  979. {
  980. struct search *s = container_of(cl, struct search, cl);
  981. if (s->iop.flush_journal)
  982. bch_journal_meta(s->iop.c, cl);
  983. continue_at(cl, search_free, NULL);
  984. }
  985. static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
  986. {
  987. struct search *s;
  988. struct closure *cl;
  989. struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
  990. int cpu, rw = bio_data_dir(bio);
  991. cpu = part_stat_lock();
  992. part_stat_inc(cpu, &d->disk->part0, ios[rw]);
  993. part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
  994. part_stat_unlock();
  995. s = search_alloc(bio, d);
  996. cl = &s->cl;
  997. bio = &s->bio.bio;
  998. trace_bcache_request_start(s->d, bio);
  999. if (!bio->bi_iter.bi_size) {
  1000. /*
  1001. * can't call bch_journal_meta from under
  1002. * generic_make_request
  1003. */
  1004. continue_at_nobarrier(&s->cl,
  1005. flash_dev_nodata,
  1006. bcache_wq);
  1007. } else if (rw) {
  1008. bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
  1009. &KEY(d->id, bio->bi_iter.bi_sector, 0),
  1010. &KEY(d->id, bio_end_sector(bio), 0));
  1011. s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
  1012. s->iop.writeback = true;
  1013. s->iop.bio = bio;
  1014. closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
  1015. } else {
  1016. closure_call(&s->iop.cl, cache_lookup, NULL, cl);
  1017. }
  1018. continue_at(cl, search_free, NULL);
  1019. }
  1020. static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
  1021. unsigned int cmd, unsigned long arg)
  1022. {
  1023. return -ENOTTY;
  1024. }
  1025. static int flash_dev_congested(void *data, int bits)
  1026. {
  1027. struct bcache_device *d = data;
  1028. struct request_queue *q;
  1029. struct cache *ca;
  1030. unsigned i;
  1031. int ret = 0;
  1032. for_each_cache(ca, d->c, i) {
  1033. q = bdev_get_queue(ca->bdev);
  1034. ret |= bdi_congested(&q->backing_dev_info, bits);
  1035. }
  1036. return ret;
  1037. }
  1038. void bch_flash_dev_request_init(struct bcache_device *d)
  1039. {
  1040. struct gendisk *g = d->disk;
  1041. g->queue->make_request_fn = flash_dev_make_request;
  1042. g->queue->backing_dev_info.congested_fn = flash_dev_congested;
  1043. d->cache_miss = flash_dev_cache_miss;
  1044. d->ioctl = flash_dev_ioctl;
  1045. }
  1046. void bch_request_exit(void)
  1047. {
  1048. #ifdef CONFIG_CGROUP_BCACHE
  1049. cgroup_unload_subsys(&bcache_subsys);
  1050. #endif
  1051. if (bch_search_cache)
  1052. kmem_cache_destroy(bch_search_cache);
  1053. }
  1054. int __init bch_request_init(void)
  1055. {
  1056. bch_search_cache = KMEM_CACHE(search, 0);
  1057. if (!bch_search_cache)
  1058. return -ENOMEM;
  1059. #ifdef CONFIG_CGROUP_BCACHE
  1060. cgroup_load_subsys(&bcache_subsys);
  1061. init_bch_cgroup(&bcache_default_cgroup);
  1062. cgroup_add_cftypes(&bcache_subsys, bch_files);
  1063. #endif
  1064. return 0;
  1065. }